diff --git "a/3657.jsonl" "b/3657.jsonl" new file mode 100644--- /dev/null +++ "b/3657.jsonl" @@ -0,0 +1,666 @@ +{"seq_id":"107387634","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom program.models import Program\nfrom vote.models import Vote, vote_types\n\nimport json\nimport os\n\nkey_func_mapping = {\n \"new\": \"created\",\n \"top\": lambda program: sum([getattr(program, t + \"_votes\") for t in vote_types])\n}\nfor t in vote_types:\n key_func_mapping[t] = t + \"_votes\"\n\n# Create your views here.\ndef program (request, program_id):\n data_dict = {}\n\n if program_id == \"new\":\n data_dict[\"new\"] = True\n data_dict[\"canEditProgram\"] = True\n\n with open(os.path.join(os.path.dirname(__file__), 'default.json')) as data_file:\n data_dict.update(json.load(data_file))\n else:\n try:\n current_program = Program.objects.select_related('user').get(program_id=program_id)\n except Program.DoesNotExist:\n return render(request, \"program/404.html\", status=404)\n\n data_dict = current_program.to_dict()\n data_dict[\"canEditProgram\"] = (current_program.user == request.user)\n data_dict[\"hasVoted\"] = dict([(t, bool(Vote.objects.filter(vote_type=t, voted_object_id=program_id, user_id=request.user.id).count())) for t in vote_types])\n\n return render(request, \"program/index.html\", {\"data_dict\": json.dumps(data_dict)})\n\ndef program_list (request, sort):\n if (not sort):\n sort = \"new\" # Default sort. sort is actually passed in as None, so we can't use an argument default\n\n if (sort not in key_func_mapping):\n return redirect(\"/programs\")\n\n key_func = key_func_mapping[sort]\n if (type(key_func) is unicode):\n key_func = lambda program: getattr(program, key_func_mapping[sort])\n\n programs = sorted(Program.objects.all(), reverse=True, key=key_func)[:20]\n\n return render(request, \"program/list.html\", {\"programs\": programs})\n\ncontent_types = {\n \"html\": \"text/plain\", # Don't want text/html, because that would be served as a webpage\n \"css\": \"text/css\",\n \"js\": \"application/javascript\"\n}\n\ndef program_file (request, program_id, file_type):\n try:\n program = Program.objects.get(program_id=program_id)\n except Program.DoesNotExist:\n return HttpResponse(\"404: No program found with that id\", status=404)\n\n return HttpResponse(getattr(program, file_type), content_type=content_types[file_type])\n\ndef fullscreen (request, program_id):\n try:\n program = Program.objects.get(program_id=program_id)\n except Program.DoesNotExist:\n return render(request, \"program/404.html\", status=404)\n\n data_dict = {\n \"id\": program.program_id,\n\n \"js\": program.js,\n \"html\": program.html,\n \"css\": program.css,\n \"title\": program.title,\n }\n\n return render(request, \"program/fullscreen.html\", { \"data_dict\": json.dumps(data_dict) })\n","sub_path":"django_code/program/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"605750453","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nN = 100\nD = 2\n\nX = np.random.randn(N, D)\n\n# center the first 50 points at (-2, -2)\nX[:50, :] = X[:50, :] - 2 * np.ones((50, D))\n\n# center the last 50 points at (2, 2)\nX[50:, :] = X[50:, :] + 2 * np.ones((50, D))\n\n# labels: first 50 are 0, last 50 are 1\nT = np.array([0] * 50 + [1] * 50)\n\n# add a column of ones for bias term\n# ones = np.array([[1] * N]).T # old\nones = np.ones((N, 1))\nXb = np.concatenate((ones, X), axis = 1)\n\ndef sigmoid(z):\n\treturn 1/(1 + np.exp(-z))\n\nw = np.array([0, 4, 4])\n\nz = Xb.dot(w)\nY = sigmoid(z)\n\nplt.scatter(np.array([1, 2, 3, 4, 5, 6]), np.array([6, 5, 4, 3, 2, 1]))\nplt.show()\n\nplt.scatter(X[:, 0], X[:, 1], c = T, s = 100, alpha = 0.5)\n\nx_axis = np.linspace(-6, 6, 100)\ny_axis = -x_axis\nplt.plot(x_axis, y_axis)\nplt.show()\n\nprint(Y)\nplt.scatter(Y, T, c = T, s = 100, alpha = 0.5)\nplt.show()","sub_path":"logistic_regression/logistic_visualize.py","file_name":"logistic_visualize.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"167218298","text":"# Drone Programming\r\n# PlanetBravo\r\n# Lily Stone\r\n\r\n# Import the necessary modules\r\nimport threading \r\nimport socket\r\nimport time \r\n\r\n\r\n# IP and port of Tello\r\ntello_address = ('192.168.10.1', 8889)\r\n\r\n# IP and port of local computer\r\nphone = ''\r\nport = 9000\r\nlocaladdress = (phone,port)\r\n\r\n# Create a UDP socket\r\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\r\n# Bind to the local address and port\r\nsock.bind(localaddress)\r\n\r\n\r\n# Send the message to Tello\r\ndef send(message):\r\n try:\r\n sock.sendto(message.encode(), tello_address)\r\n print (\"Sending message: \" + message)\r\n except Exception as e:\r\n print (\"Error sending, Drone says: \" + str(e))\r\n \r\n\r\n\r\n# Receive the message from Tello\r\ndef recv():\r\n # Continuously loop and listen for incoming messages\r\n while True:\r\n # Try to receive the message. If there is a problem print the exception.\r\n try:\r\n response, drone = sock.recvfrom(1518)\r\n print(\"Drone says: \" + response.decode(encoding=\"utf-8\"))\r\n except Exception as e:\r\n sock.close()\r\n print (\"Error receiving, Drone says: \" + str(e))\r\n break\r\n\r\n# Create and start a listening thread that runs in the background\r\n# This utilizes our receive functions and will continuously monitor for incoming\r\nrecvThread = threading.Thread(target=recv)\r\nrecvThread.daemon = True\r\nrecvThread.start()\r\n\r\n# Give your drone commands by placing code below.\r\nsend (\"command\")\r\ntime.sleep(8)\r\n\r\nsend (\"takeoff\")\r\ntime.sleep(8)\r\n\r\nsend (\"up \" + str(50))\r\ntime.sleep(8)\r\n\r\nsend (\"forward \" + str(100))\r\ntime.sleep(8)\r\n\r\nsend (\"back \" + str(50))\r\ntime.sleep(8)\r\n\r\nsend (\"down \" + str(50))\r\ntime.sleep(8)\r\n\r\nsend (\"cw \" + str(360))\r\ntime.sleep(8)\r\n\r\nfor i in range(2):\r\n send (\"flip f \")\r\n time.sleep(8)\r\n \r\n send (\"forward \" + str(30))\r\n time.sleep(8)\r\n \r\n send (\"flip r \")\r\n time.sleep(8)\r\n \r\n send (\"right \" + str(30))\r\n time.sleep(8)\r\n \r\n send (\"flip b \")\r\n time.sleep(8)\r\n \r\n send (\"back \" + str(30))\r\n time.sleep(8)\r\n \r\n send (\"flip l \")\r\n time.sleep(8)\r\n \r\nsend (\"ccw \" + str(360))\r\ntime.sleep(8)\r\n\r\nsend (\"land\") \r\n\r\n\r\n# Report mission status\r\nprint(\"Mission completed successfully!\")\r\n \r\n#Clean-up your open connections by closing them. \r\nsock.close()","sub_path":"drone_program_lily.py","file_name":"drone_program_lily.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"265997336","text":"import sys\n#\n# >>> Escriba el codigo del mapper a partir de este punto <<<\n#\nif __name__ == '__main__':\n \n conteo = 0\n\n for line in sys.stdin:\n line = line.strip()\n val3, val2, key, val1 = line.split(\"\\t\")\n sys.stdout.write(\"{}\\t{}\\t{}\\n\".format(key, val1, val2))\n conteo += 1\n \n if conteo == 6:\n break","sub_path":"01-hadoop-50/q09-10/reducer.py","file_name":"reducer.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"115468444","text":"import random\n\nwith open('vocab_src', 'w') as fo:\n for i in range(5):\n fo.write(str(i)+'\\n')\n\nwith open('vocab_tgt', 'w') as fo:\n for i in range(5):\n fo.write(str(i)+'\\n')\n\nmapping = range(5)\nrandom.shuffle(mapping)\n\ndef get_instance():\n def toString(x):\n return ' '.join(str(i) for i in x)\n _len = random.randint(3, 5)\n src = [random.randint(0, 4) for j in range(_len)]\n\n tgt = [ mapping[x] for x in src]\n\n return '|'.join([toString(i) for i in [src, tgt]])\n\nwith open('train', 'w') as fo:\n for i in range(10000):\n fo.write( get_instance()+'\\n')\n\nwith open('dev', 'w') as fo:\n for i in range(1000):\n fo.write( get_instance()+'\\n')\n","sub_path":"ranker/toy/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"278114048","text":"import cv2\nfrom prediction import Prediccion\nimport numpy as np\nimport base64\n\nf = open(r\"C:\\Users\\G40\\Develop\\Python\\Proyecto_SI2\\app\\imageBase64_8.txt\", \"rt\", encoding=\"utf8\")\nstrImage = f.read()\nf.close()\n\nreconocimiento = Prediccion()\n# imagenPrueba = cv2.imread(\"test/8/8_8_58.jpg\", 0)\nimagenPruebaDecoded = base64.b64decode(strImage)\nnp_data = np.fromstring(imagenPruebaDecoded, np.uint8)\nimg = cv2.imdecode(np_data, cv2.IMREAD_GRAYSCALE)\nindiceCategoria = reconocimiento.predecir(\"123\", img)\nprint(indiceCategoria)\n# print(\"La imamgen cargada es \", categorias[indiceCategoria])\n\n## Mostrar Imagen \n\nwhile True:\n cv2.imshow(\"test imagen\",img)\n k = cv2.waitKey(30) & 0xff\n if k == 27:\n break\ncv2.destroyAllWindows()","sub_path":"app/test_base64/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"272605855","text":"\"\"\"\nversion.py - Willie Version Module\nCopyright 2009, Silas Baronda\nLicensed under the Eiffel Forum License 2.\n\nhttp://willie.dftba.net\n\"\"\"\n\nfrom datetime import datetime\nfrom subprocess import Popen, PIPE\nimport willie\n\n\ndef git_info():\n p = Popen([\"git\", \"log\", \"-n 1\"], stdout=PIPE, close_fds=True)\n\n commit = p.stdout.readline()\n author = p.stdout.readline()\n date = p.stdout.readline()\n return commit, author, date\n\n\n@willie.module.commands('version')\ndef version(bot, trigger):\n \"\"\"Display the latest commit version, if Willie is running in a git repo.\"\"\"\n commit, author, date = git_info()\n\n if not commit.strip():\n bot.reply(\"Willie v. \" + willie.__version__)\n return\n\n bot.say(str(trigger.nick) + \": Willie v. %s at commit:\" % willie.__version__)\n bot.say(\" \" + commit)\n bot.say(\" \" + author)\n bot.say(\" \" + date)\n\n\n@willie.module.rule('\\x01VERSION\\x01')\n@willie.module.rate(20)\ndef ctcp_version(bot, trigger):\n bot.write(('NOTICE', trigger.nick),\n '\\x01VERSION Willie IRC Bot version %s\\x01' % willie.__version__)\n\n\n@willie.module.rule('\\x01SOURCE\\x01')\n@willie.module.rate(20)\ndef ctcp_source(bot, trigger):\n bot.write(('NOTICE', trigger.nick),\n '\\x01SOURCE https://github.com/Embolalia/willie/\\x01')\n\n\n@willie.module.rule('\\x01PING\\s(.*)\\x01')\n@willie.module.rate(10)\ndef ctcp_ping(bot, trigger):\n text = trigger.group()\n text = text.replace(\"PING \", \"\")\n text = text.replace(\"\\x01\", \"\")\n bot.write(('NOTICE', trigger.nick),\n '\\x01PING {0}\\x01'.format(text))\n\n\n@willie.module.rule('\\x01TIME\\x01')\n@willie.module.rate(20)\ndef ctcp_time(bot, trigger):\n dt = datetime.now()\n current_time = dt.strftime(\"%A, %d. %B %Y %I:%M%p\")\n bot.write(('NOTICE', trigger.nick),\n '\\x01TIME {0}\\x01'.format(current_time))\n","sub_path":"IRC Bot/willie-4.1.0/willie/modules/version.py","file_name":"version.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"252124675","text":"#!/usr/bin/env python3\n\nfrom tkinter import scrolledtext as st\nimport tkinter as tk\nimport platform\nimport random\nimport os\n\nclass Ewa:\n\t'Bot class for bot that acts like Ewa'\n\t#variables\n\tname = \"Ewa\"\n\ttitle = \"Schwesta Ewa\"\n\thello = \"Hee\"\n\n\t#os dependent variables\n\tentry_width_unix = 55\n\tsend_width_unix = 10\n\tentry_width_win = 70\n\tsend_width_win = 18\n\n\t#width defaults\n\tentry_width = 55\n\tsend_width = 10\n\n\t#answer-set\n\tanswers = list(range(15))\n\tanswers[0]=\"Na moch i sicha nicht meine goschn haltn weil ich meine gosche nich \\nhalten tu weil ich gern meine gosche nicht halte u deswegn halt i nd \\nmei gosche weil ich mei gosche nicht halten werden moechten wollen \\nwill!\";\n\tanswers[1]=\"Paaaaaassst du schoffst dssss \\nI believe in u \\nI believe i can fly too \\nFUck \\nAltha \\nI believe i can touch de sky \\nWATn los hier \\nAh jz gehts wida xd \\nLol swag yolo hipster \\nHopster\";\n\tanswers[2]=\"Chek i moi wida nd...owa lustig find is trotzdem xD\";\n\tanswers[3]=\"Thenks mather for my live \\nAnd soeri for mei inglish\";\n\tanswers[4]=\"Na owa echt wer wue den scho kinda hobn? Ma is 9 monat fett, zerstoert \\nsi sei vagina , nur um iwos ind welt zu setzn ds stinkt, kotzt, reat, \\nlaut u deppad is u iwon undonkbor und di ala lost? XD\";\n\tanswers[5]=\"Hahahahahaha fuck bist du a spast xd echt jz? Du bist bullshit :p\";\n\tanswers[6]=\"Njo schau dan kannst doch nd alles oiso konnst nd sogn dasd alles \\nkonnst wenn dem nd so is da du nd alles sondern nur fast alles konnst \\nu ds an fettn unterschied mocht wennma sogt ma kan alles u ma aber \\nnur fast alles wirkli kann!\";\n\tanswers[7]=\"Kumm i do am schnellsten mim pegasus ueber in klammeraffenberg?\";\n\tanswers[8]=\"Is ds nd irgend a wort auf jidisch? XD\";\n\tanswers[9]=\"Hahahahaha best thing ewa\";\n\tanswers[10]=\"I bin di perfekte hausfrau...\";\n\tanswers[11]=\"Homo4ewa\";\n\tanswers[12]=\"Vadommt! Bitch.\";\n\tanswers[13]=\"Spast xD\";\n\tanswers[14]=\"In deine schuhe gekackt ich habe\";\n\t\n\t#constructor\n\tdef __init__(self, master):\n\t\tself.master = master\n\t\tmaster.title(self.title)\n\n\t\tif \"Windows\" in platform.system():\n\t\t\tself.entry_width = self.entry_width_win\n\t\t\tself.send_width = self.send_width_win\n\n\t\tgreeting = self.name+\": \"+self.hello\n\t\tself.avowly = tk.PhotoImage(file=\"avatars/ewa\")\n\t\tself.avatar = tk.Label(self.master, image=self.avowly)\n\t\tself.avatar.image = self.avowly\n\t\tself.avatar.grid(row=0, column=0, rowspan=5)\n\t\tself.owlyChat = st.ScrolledText(self.master, width=80, height=30, state=tk.NORMAL)\n\t\tself.owlyChat.grid(row=1, column=1, rowspan=10, columnspan=4)\n\t\tself.owlyChat.insert(tk.INSERT, greeting)\n\t\tself.owlyChat.config(state=tk.DISABLED)\n\t\tself.owlyChat.see('end')\n\t\tself.owlyEntry = tk.Entry(self.master, width=self.entry_width)\n\t\tself.owlyEntry.grid(row=11, column=1, rowspan=2, columnspan=3)\n\t\tself.owlySend = tk.Button(self.master, text=\"Send!\", command=self.callbackOwly, width=self.send_width)\n\t\tself.owlySend.grid(row=11, column=4, rowspan=2, columnspan=1)\n\n\t\tmaster.bind(\"\", self.callbackOwly)\n\t\tmaster.bind(\"\", self.help)\n\n\t#functions\n\tdef help(self, event=None):\n\t\thelpFile = open(\"bin/system/help/help\", \"r\")\n\t\thelpFileContent = helpFile.read()\n\t\thelpFile.close()\n\n\t\thelp_window = tk.Toplevel()\n\t\thelp_window.title(\"Help\")\n\t\thelpMessage = st.ScrolledText(help_window, width=160, height=40, state=tk.NORMAL)\n\t\thelpMessage.grid(row=0, column=0)\n\t\thelpMessage.insert(tk.INSERT, helpFileContent)\n\t\thelpMessage.config(state=tk.DISABLED)\n\n\t#eventfunction\n\tdef callbackOwly(self, event=None):\n \n\t\tentryText = self.owlyEntry.get()\n\t\tself.owlyEntry.delete(0, \"end\")\n\t\t#answers-start\n\t\n\t\trandomNumber = random.randint(0, 14)\n\n\t\tif \"film\" in entryText:\n\t\t\twriteContentOwly = \">The Ewangers - Mission Micha\"\n\t\telif \"tschuess\" in entryText:\n\t\t\twriteContentOwly = \"Ewa: Kurwa!\"\n\t\telse:\n\t\t\tanswer = \"Ewa: \"+self.answers[randomNumber]\n\t\t\twriteContentOwly = answer\n\t\t\n\t\t#answers-end\n\t\t#ChatFrame Handling\n\t\twriteContentYou = \"\\nDu: \"+entryText\n\t\tself.owlyChat.config(state=tk.NORMAL)\n\t\tself.owlyChat.insert(tk.INSERT, writeContentYou)\n\t\tself.owlyChat.config(state=tk.DISABLED)\n\t\tself.owlyChat.see('end')\n\t\twriteContentMe = \"\\n\"+writeContentOwly\n\t\tself.owlyChat.config(state=tk.NORMAL)\n\t\tself.owlyChat.insert(tk.INSERT, writeContentMe)\n\t\tself.owlyChat.config(state=tk.DISABLED)\n\t\tself.owlyChat.see('end')\n\n#init gui\ndef main():\n\towly = tk.Tk()\n\tapp = Ewa(owly)\n\towly.mainloop()\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"pyBots/ewa.py","file_name":"ewa.py","file_ext":"py","file_size_in_byte":4381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"168223492","text":"import json\nfrom elasticsearch import Elasticsearch\n\n\n\n# pip3 install elasticsearch==7.0.2 -i https://mirrors.aliyun.com/pypi/simple/\n\nes = Elasticsearch(hosts=\"http://elasticsearch:9200/\")\nquery_json = {\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"match_phrase\": {\n \"app_id.keyword\": {\n \"query\": \"9t5s5iaq\"\n }\n }\n },\n {\n \"range\": {\n \"@timestamp\": {\n \"format\": \"strict_date_optional_time\",\n \"gte\": \"2020-04-14T07:00:00.000Z\",\n \"lte\": \"2020-04-14T07:59:59.000Z\"\n }\n }\n }\n ]\n }\n }\n}\n\n\ndef export():\n query = es.search(index='new-apm-alive-2020.04.14', body=query_json, scroll='5m', size=100)\n\n results = query['hits']['hits']\n total = query['hits']['total']\n scroll_id = query['_scroll_id']\n\n batch = int(total['value'] / 100) + 1\n for i in range(0, batch):\n query_scroll = es.scroll(scroll_id=scroll_id, scroll='5m')['hits']['hits']\n results += query_scroll\n\n with open('/tmp/data.json', 'w', newline='', encoding='utf-8') as f:\n for res in results:\n json.dump(res['_source'], f)\n f.write('\\n')\n\n print('done!')\n\n\nif __name__ == '__main__':\n export()\n","sub_path":"elk/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"509295194","text":"\nfrom django.urls import path\nfrom . import views #追加\n\n\nurlpatterns = [\n path('',views.refrigerator , name = 'refrigerator'),\n path('food_register',views.food_register , name = 'food_register'),\n path('food_change_select',views.food_change_select , name = 'food_change_select'),\n path('food_search',views.food_search , name = 'food_search'),\n path('food_delete',views.food_delete , name = 'food_delete'),\n path('food_change/',views.food_change , name = 'food_change'),\n]\n","sub_path":"syokuzai_tarou/refrigerator/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"172850804","text":"#! python3\n\n\"\"\"Mission Manager\"\"\"\n\nimport json, traceback, worker\nfrom os import path\nfrom collections import OrderedDict\n\nfrom .safeprint import safeprint\nfrom .config import setting\nfrom .core import Mission, Episode\nfrom .io import content_read, content_write, is_file, backup\n\ndef shallow(dict, exclude=None):\n\t\"\"\"Return a shallow copy of a dict.\n\n\tArguments:\n\texclude - A list of key name which should not to copy. (default: None)\n\t\"\"\"\n\tnew_dict = {}\n\tfor key in dict:\n\t\tif not exclude or key not in exclude:\n\t\t\tnew_dict[key] = dict[key]\n\treturn new_dict\n\nclass MissionPoolEncoder(json.JSONEncoder):\n\t\"\"\"Encode Mission, Episode to json.\"\"\"\n\n\tdef default(self, object):\n\t\tif isinstance(object, Mission):\n\t\t\treturn shallow(vars(object), exclude=[\"module\", \"thread\"])\n\n\t\tif isinstance(object, Episode):\n\t\t\treturn shallow(vars(object))\n\n\t\treturn super().default(object)\n\nclass MissionManager(worker.UserWorker):\n\t\"\"\"Save, load missions from files\"\"\"\n\n\tdef __init__(self):\n\t\t\"\"\"Construct.\"\"\"\n\t\tsuper().__init__()\n\n\t\tself.pool = {}\n\t\tself.view = OrderedDict()\n\t\tself.library = OrderedDict()\n\t\tself.edit = False\n\n\tdef cleanup(self):\n\t\t\"\"\"Cleanup unused missions\"\"\"\n\t\tmain_pool = set(self.pool)\n\t\tview_pool = set(self.view)\n\t\tlibrary_pool = set(self.library)\n\n\t\tfor url in main_pool - (view_pool | library_pool):\n\t\t\tdel self.pool[url]\n\n\tdef worker(self):\n\t\t\"\"\"Override. The worker target.\"\"\"\n\t\t@self.listen(\"MISSION_PROPERTY_CHANGED\")\n\t\t@self.listen(\"DOWNLOAD_EP_COMPLETE\")\n\t\tdef dummy():\n\t\t\t\"\"\"Set the edit flag after mission changed.\"\"\"\n\t\t\tself.edit = True\n\n\t\t@self.listen(\"WORKER_DONE\")\n\t\tdef dummy():\n\t\t\t\"\"\"Save missions after the thread terminate.\"\"\"\n\t\t\tself.save()\n\n\t\tself.load()\n\t\twhile True:\n\t\t\tself.wait(setting.getint(\"autosave\", 5) * 60)\n\t\t\tself.save()\n\n\tdef save(self):\n\t\t\"\"\"Save missions to json.\"\"\"\n\t\tif not self.edit:\n\t\t\treturn\n\n\t\tcontent_write(\n\t\t\t\"~/comiccrawler/pool.json\",\n\t\t\tjson.dumps(\n\t\t\t\tlist(self.pool.values()),\n\t\t\t\tcls=MissionPoolEncoder,\n\t\t\t\tindent=4,\n\t\t\t\tensure_ascii=False\n\t\t\t)\n\t\t)\n\t\tcontent_write(\n\t\t\t\"~/comiccrawler/view.json\",\n\t\t\tjson.dumps(\n\t\t\t\tlist(self.view),\n\t\t\t\tindent=4,\n\t\t\t\tensure_ascii=False\n\t\t\t)\n\t\t)\n\t\tcontent_write(\n\t\t\t\"~/comiccrawler/library.json\",\n\t\t\tjson.dumps(\n\t\t\t\tlist(self.library),\n\t\t\t\tindent=4,\n\t\t\t\tensure_ascii=False\n\t\t\t)\n\t\t)\n\t\tself.edit = False\n\t\tsafeprint(\"Session saved\")\n\n\tdef load(self):\n\t\t\"\"\"Load mission from json.\n\n\t\tIf it fail to load missions, create json backup in\n\t\t`~/comiccrawler/invalid-save`.\n\t\t\"\"\"\n\t\ttry:\n\t\t\tself._load()\n\t\texcept Exception as err:\n\t\t\tsafeprint(\"Failed to load session!\")\n\t\t\ttraceback.print_exc()\n\t\t\tbackup(\"~/comiccrawler/*.json\")\n\t\t\tself.bubble(\"MISSION_POOL_LOAD_FAILED\", err)\n\t\tself.cleanup()\n\t\tself.bubble(\"MISSION_POOL_LOAD_SUCCESS\")\n\n\tdef _load(self):\n\t\t\"\"\"Load missions from json. Called by MissionManager.load.\"\"\"\n\t\tpool = []\n\t\tview = []\n\t\tlibrary = []\n\n\t\tif is_file(\"~/comiccrawler/pool.json\"):\n\t\t\tpool = json.loads(content_read(\"~/comiccrawler/pool.json\"))\n\n\t\tif is_file(\"~/comiccrawler/view.json\"):\n\t\t\tview = json.loads(content_read(\"~/comiccrawler/view.json\"))\n\n\t\tif is_file(\"~/comiccrawler/library.json\"):\n\t\t\tlibrary = json.loads(content_read(\"~/comiccrawler/library.json\"))\n\n\t\tfor m_data in pool:\n\t\t\t# reset state\n\t\t\tif m_data[\"state\"] in (\"DOWNLOADING\", \"ANALYZING\"):\n\t\t\t\tm_data[\"state\"] = \"ERROR\"\n\t\t\t# build episodes\n\t\t\tepisodes = []\n\t\t\tfor ep_data in m_data[\"episodes\"]:\n\t\t\t\tepisodes.append(Episode(**ep_data))\n\t\t\tm_data[\"episodes\"] = episodes\n\t\t\tmission = Mission(**m_data)\n\t\t\tself._add(mission)\n\n\t\tfor url in view:\n\t\t\tself.view[url] = self.pool[url]\n\n\t\tfor url in library:\n\t\t\tself.library[url] = self.pool[url]\n\n\t\tself.bubble(\"MISSION_LIST_REARRANGED\", self.view)\n\t\tself.bubble(\"MISSION_LIST_REARRANGED\", self.library)\n\n\tdef _add(self, mission):\n\t\t\"\"\"Add mission to public pool.\"\"\"\n\t\tif mission.url not in self.pool:\n\t\t\tself.add_child(mission)\n\t\t\tself.pool[mission.url] = mission\n\n\tdef add(self, pool_name, *missions):\n\t\t\"\"\"Add missions to pool.\"\"\"\n\t\tpool = getattr(self, pool_name)\n\n\t\tfor mission in missions:\n\t\t\tself._add(mission)\n\t\t\tpool[mission.url] = mission\n\n\t\tself.bubble(\"MISSION_LIST_REARRANGED\", pool)\n\t\tself.edit = True\n\n\tdef remove(self, pool_name, *missions):\n\t\t\"\"\"Remove missions from pool.\"\"\"\n\t\tpool = getattr(self, pool_name)\n\n\t\t# check mission state\n\t\tmissions = [m for m in missions if m.state not in (\"ANALYZING\", \"DOWNLOADING\")]\n\n\t\tfor mission in missions:\n\t\t\tdel pool[mission.url]\n\n\t\tself.cleanup()\n\t\tself.bubble(\"MISSION_LIST_REARRANGED\", pool)\n\t\tself.edit = True\n\n\tdef lift(self, pool_name, *missions):\n\t\t\"\"\"Lift missions to the top.\"\"\"\n\t\tpool = getattr(self, pool_name)\n\t\tfor mission in reversed(missions):\n\t\t\tpool.move_to_end(mission.url, last=False)\n\t\tself.bubble(\"MISSION_LIST_REARRANGED\", pool)\n\t\tself.edit = True\n\n\tdef drop(self, pool_name, *missions):\n\t\t\"\"\"Drop missions to the bottom.\"\"\"\n\t\tpool = getattr(self, pool_name)\n\t\tfor mission in missions:\n\t\t\tpool.move_to_end(mission.url)\n\t\tself.bubble(\"MISSION_LIST_REARRANGED\", pool)\n\t\tself.edit = True\n\n\tdef get_by_state(self, pool_name, states, all=False):\n\t\t\"\"\"Get missions by states.\"\"\"\n\t\tif not all:\n\t\t\tfor mission in getattr(self, pool_name).values():\n\t\t\t\tif mission.state in states:\n\t\t\t\t\treturn mission\n\t\t\treturn None\n\t\telse:\n\t\t\toutput = []\n\t\t\tfor mission in getattr(self, pool_name).values():\n\t\t\t\tif mission.state in states:\n\t\t\t\t\toutput.append(mission)\n\t\t\treturn output\n\n\tdef get_by_url(self, url, pool_name=None):\n\t\t\"\"\"Get mission by url.\"\"\"\n\t\tif not pool_name:\n\t\t\treturn self.pool[url]\n\t\treturn getattr(self, pool_name)[url]\n\n","sub_path":"comiccrawler/mission_manager.py","file_name":"mission_manager.py","file_ext":"py","file_size_in_byte":5513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"224777043","text":"import pytest\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\r\n\r\n\r\n@pytest.fixture(params=['chrome', 'firefox'], scope='class')\r\ndef get_driver(request):\r\n if request.param == \"firefox\":\r\n driver = webdriver.Remote('http://selenium:4444/wd/hub', DesiredCapabilities.FIREFOX)\r\n else:\r\n driver = webdriver.Remote('http://selenium:4444/wd/hub', DesiredCapabilities.CHROME)\r\n request.cls.driver = driver\r\n\r\n yield driver\r\n driver.quit()\r\n\r\n","sub_path":"test_project/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"223335302","text":"#coding=utf-8\n\nfrom app.models import Site, Asset\n\nfrom ..same import *\nfrom .forms import AssetForm\nfrom .custom import CustomValidator\n\nnow = \"asset\"\nstart_thead = [\n [0, u'资产编号','asset_tag', False, False], [1, u'设备类型', 'types', False, True], \n [2, u'设备品牌', 'brand', False, True], [3, u'设备型号', 'model', False, True], \n [4, u'设备SN','sn', False, False], [5, u'设备信息', 'info', True, True], \n [6, u'购买日期', 'buy_date', False, True], [7, u'机房', 'site', False, True], \n [8,u'使用人/设备', 'use', False, True], [9, u'开通时间', 'start_date', False, True], \n [10, u'到期时间' ,'expire_date', False, True], [11, u'备注' ,'remark', False, True], \n [12, u'操作', 'setting', True], [13, u'批量处理', 'batch', True]\n]\nendpoint = '.asset_search'\n\n@cmdb.route('/cmdb/asset/search', methods=['GET'])\n@login_required\n@permission_validation(Permission.ADVANCED_QUERY)\ndef asset_search():\n thead = copy.deepcopy(start_thead)\n task_info = ticket_status()\n search_value = request.args.get('search', '')\n checkbox = request.args.getlist('hidden') or request.args.get('hiddens', '')\n \n if search_value:\n thead = init_checkbox(thead, checkbox)\n page = int(request.args.get('page', 1))\n result = search(Asset, 'asset_tag', search_value)\n result = result.search_return()\n if result:\n pagination = result.paginate(page, 100, False)\n items = pagination.items\n return render_template(\n 'cmdb/search.html', task_info=task_info, sidebar=sidebar, \n search_value=search_value, checkbox=str(checkbox),\n thead=thead, pagination=pagination, endpoint=endpoint, \n items=items, now=now \n )\n \n return render_template('cmdb/search.html', task_info=task_info, sidebar=sidebar, \n search_value=search_value, thead=thead)\n\n@cmdb.route('/cmdb/asset/add', methods=['GET', 'POST'])\n@login_required\n@permission_validation(Permission.ALTER)\ndef asset_add():\n asset_form = AssetForm()\n asset_form.site.choices = [(site.site, site.site) for site in Site.query.order_by(Site.site.asc()).all()]\n task_info = ticket_status()\n\n if asset_form.validate_on_submit():\n asset = Asset(\n asset_tag = asset_form.asset_tag.data,\n types=asset_form.types.data,\n brand=asset_form.brand.data,\n model=asset_form.model.data,\n sn=asset_form.sn.data,\n info=asset_form.info.data,\n buy_date=asset_form.buy_date.data,\n site=asset_form.site.data,\n use=asset_form.use.data,\n start_date=asset_form.start_date.data,\n expire_date=asset_form.expire_date.data,\n remark=asset_form.remark.data\n )\n add_sql = edit(asset, \"asset_tag\" )\n add_sql.run('add')\n flash(u'销售 *** %s *** 添加成功' % asset_form.asset_tag.data)\n else:\n for thead in start_thead:\n key = thead[2]\n if asset_form.errors.get(key, None):\n flash(asset_form.errors[key][0])\n break\n\n return render_template('cmdb/add.html', task_info=task_info, sidebar=sidebar, \n item_form=asset_form)\n\n@cmdb.route('/cmdb/asset/delete', methods=['POST'])\n@login_required\n@permission_validation(Permission.ALTER)\ndef asset_delete():\n del_id = int(request.form[\"id\"])\n reason = request.form[\"reason\"]\n asset = Asset.query.filter_by(id=del_id).first()\n if asset:\n delete_sql = edit(asset, \"asset_tag\", reason)\n delete_sql.run('delete')\n return \"OK\"\n return u\"删除失败没有找到这个设备\"\n\n@cmdb.route('/cmdb/asset/change', methods=['POST'])\n@login_required\n@permission_validation(Permission.ALTER)\ndef asset_change():\n change_id = int(request.form[\"id\"])\n item = request.form[\"item\"]\n value = request.form['value']\n asset = Asset.query.filter_by(id=change_id).first()\n if asset:\n verify = CustomValidator(asset, item, value)\n result = verify.validate_return()\n if result == \"OK\":\n change_sql = edit(asset, item, value)\n change_sql.run('change')\n return \"OK\"\n return result\n return u\"更改失败没有找到该用户\"\n\n@cmdb.route('/cmdb/asset/batchdelete', methods=['POST'])\n@login_required\n@permission_validation(Permission.ALTER)\ndef asset_batch_delete():\n list_id = eval(request.form[\"list_id\"])\n reason = request.form[\"reason\"]\n for id in list_id:\n asset = Asset.query.filter_by(id=id).first()\n if not asset:\n return u\"删除失败没有这些设备\"\n\n for id in list_id:\n asset = Asset.query.filter_by(id=id).first()\n delete_sql = edit(asset, \"asset_tag\", reason)\n delete_sql.run('delete')\n return \"OK\"\n\n@cmdb.route('/cmdb/asset/batchchange', methods=['POST'])\n@login_required\n@permission_validation(Permission.ALTER)\ndef asset_batch_change():\n list_id = eval(request.form[\"list_id\"])\n item = request.form[\"item\"]\n value = request.form[\"value\"]\n\n for id in list_id:\n asset = Asset.query.filter_by(id=id).first()\n if asset:\n verify = CustomValidator(asset, item, value)\n result = verify.validate_return()\n if not result == \"OK\":\n return result\n else:\n return u\"更改失败没有找到这些设备\"\n\n for id in list_id:\n asset = Asset.query.filter_by(id=id).first()\n change_sql = edit(asset, item, value)\n change_sql.run('change')\n return \"OK\"\n","sub_path":"web/app/cmdb/asset/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"260838665","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport re\nfrom datetime import datetime\nfrom scrapy import Request\nfrom scrapy.loader import ItemLoader\nfrom scrapy.exceptions import CloseSpider\n\nfrom rojak_pantau.items import News\nfrom rojak_pantau.i18n import _\nfrom rojak_pantau.util.wib_to_utc import wib_to_utc\nfrom rojak_pantau.spiders.base import BaseSpider\n\nclass PilkadaJabar2018KompasSpider(scrapy.Spider):\n name = \"pilkada_jabar_2018_kompascom\"\n allowed_domains = [\"kompas.com\"]\n start_urls = (\n 'http://indeks.kompas.com/tag/Pilkada-Jabar-2018/desc/1',\n )\n\n def parse(self, response):\n pagination = response.url\n base_url = \"http://kompas.com\"\n self.logger.info('parse: %s' % response)\n\n articles = response.css(\"ul#latest_content > li.box-shadow-new\")\n if not articles:\n raise CloseSpider('articles not found')\n\n for article in articles:\n url_selector = article.css(\"h3 > a::attr(href)\")\n if not url_selector:\n continue\n raise CloseSpider('url_selectors not found')\n url = url_selector.extract_first()\n\n # info_selectors = article.css(\"div.article__list__info > div.article__date::text\")\n # if not info_selectors:\n # continue\n # raise CloseSpider('info_selectors not found')\n # #info = 12 September, 2017 - 15:15\n # info = info_selectors.extract_first()\n #\n # time_arr = filter(None, re.split('[\\s,-]',info))\n # info_time = ' '.join([_(s) for s in time_arr if s])\n #\n # #parse date information\n # try:\n # published_at_wib = datetime.strptime(info_time, '%d %B %Y %H:%M')\n # except ValueError as e:\n # raise CloseSpider('cannot_parse_date: %s' % e)\n #\n # #convert to utc+0\n # published_at = wib_to_utc(published_at_wib)\n\n # #TODO check the last time for scrapping\n #\n\n yield Request(url=url, callback=self.parse_news)\n\n next_page = pagination\n index = int(next_page.rsplit('/', 1)[-1]) + 1\n next_page = next_page.rsplit('/', 1)[-2] + \"/\" + str(index)\n yield Request(next_page, callback=self.parse)\n\n def parse_news(self, response):\n self.logger.info('parse_news: %s' % response)\n\n loader = ItemLoader(item=News(), response=response)\n loader.add_value('url', response.url)\n\n #parse title\n title_selectors = response.css('h1.read__title::text')\n if not title_selectors:\n return loader.load_item()\n title = title_selectors.extract_first()\n loader.add_value('title', title)\n\n #parse date\n date_selectors = response.css('div.read__date::text')\n if not date_selectors:\n return loader.load_item()\n date_str = date_selectors.extract()[0]\n\n # eg: Tuesday, 12 September 2017 | 20:21 WIB\n time_arr = filter(None,re.split('[\\s,|]', date_str))[1:-1]\n info_time = ' '.join([_(s) for s in time_arr if s])\n\n #parse date information\n try:\n published_at_wib = datetime.strptime(info_time, '%d %B %Y %H:%M')\n except ValueError as e:\n raise CloseSpider('cannot_parse_date: %s' % e)\n\n #convert to utc+0\n published_at = wib_to_utc(published_at_wib)\n loader.add_value('published_at', published_at)\n\n #parse author name\n author_name_selectors = response.css('div.contentArticle.box-shadow-new > h6::text').extract_first()\n if not author_name_selectors:\n loader.add_value('author_name', 'N/A')\n else:\n author_name = author_name_selectors\n loader.add_value('author_name', author_name)\n\n #parse raw content\n raw_content_selectors = response.css('div.contentArticle.box-shadow-new').extract()\n if not raw_content_selectors:\n return loader.load_item()\n raw_content = raw_content_selectors\n loader.add_value('raw_content', raw_content)\n\n return loader.load_item()\n","sub_path":"crawler/rojak_pantau/spiders/pilkada_jabar_2018_kompascom.py","file_name":"pilkada_jabar_2018_kompascom.py","file_ext":"py","file_size_in_byte":4156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"174610776","text":"import numpy as np\nimport cv2 as cv\nimport binocular\nimport Binocular_correct\nimport monocular\n\n# cv.namedWindow(\"left\")\n# cv.namedWindow(\"right\")\n# cv.namedWindow(\"depth\")\n# cv.moveWindow(\"left\", 0, 0)\n# cv.moveWindow(\"right\", 600, 0)\n# cv.createTrackbar(\"num\", \"depth\", 0, 10, lambda x: None)\n# cv.createTrackbar(\"blockSize\", \"depth\", 5, 255, lambda x: None)\n\n\ndef main(boardSize,pathl,pathr,board_distance,path_imagel,path_imager):\n path_imagel = cv.imread(path_imagel)\n path_imager = cv.imread(path_imager)\n # 双目图像校正\n leftMaps, rightMaps, Rl, Rr, Pl, Pr, Q = Binocular_correct.getMaps(boardSize, pathl, pathr,board_distance)\n imagelrmap = cv.remap(path_imagel, leftMaps[0], leftMaps[1], cv.INTER_LANCZOS4, cv.BORDER_CONSTANT, 0)\n imagerrmap = cv.remap(path_imager, rightMaps[0], rightMaps[1], cv.INTER_LANCZOS4, cv.BORDER_CONSTANT, 0)\n\n # 将图片置为灰度图,为StereoBM作准备\n imgL = cv.cvtColor(imagelrmap, cv.COLOR_BGR2GRAY)\n imgR = cv.cvtColor(imagerrmap, cv.COLOR_BGR2GRAY)\n cv.imshow(\"grayL\", imgL)\n cv.imshow(\"grayR\", imgR)\n\n # 制作滑动测试窗口\n cv.namedWindow(\"depth\")\n cv.createTrackbar(\"num\", \"depth\", 0, 10, lambda x: None)\n cv.createTrackbar(\"blockSize\", \"depth\", 5, 255, lambda x: None)\n num = cv.getTrackbarPos(\"num\", \"depth\")\n blockSize = cv.getTrackbarPos(\"blockSize\", \"depth\")\n if blockSize % 2 == 0:\n blockSize += 1\n if blockSize < 5:\n blockSize = 5\n\n\n cv.waitKey(-1)\n\nif __name__==\"__main__\":\n main((6,9),'C:\\\\Users\\\\lieng\\\\OneDrive\\\\Documents\\\\GitHub\\\\Opencv\\\\left','C:\\\\Users\\\\lieng\\\\OneDrive\\\\Documents\\\\GitHub\\\\Opencv\\\\right',10,'C:\\\\Users\\\\lieng\\\\OneDrive\\\\Documents\\\\GitHub\\\\Opencv\\\\left\\\\left01.jpg','C:\\\\Users\\\\lieng\\\\OneDrive\\\\Documents\\\\GitHub\\\\Opencv\\\\right\\\\right01.jpg')","sub_path":"Deep_catch.py","file_name":"Deep_catch.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"158460613","text":"import random\r\nimport operator\r\nfrom queue import PriorityQueue\r\n\r\nimport numpy as np\r\n\r\nimport torch\r\nfrom torch import nn\r\nimport torch.nn.functional as F\r\n\r\nfrom seq2seq.encoder import Encoder\r\nfrom seq2seq.decoder import Decoder\r\n\r\n\r\nclass BeamSearchNode(object):\r\n def __init__(self, hiddenstate, previousNode, wordId, logProb, length):\r\n '''Node object for storing info about trg word\r\n @param hiddenstate:\r\n @param previousNode:\r\n @param wordId:\r\n @param logProb:\r\n @param length:\r\n '''\r\n self.h = hiddenstate\r\n self.prevNode = previousNode\r\n self.wordid = wordId\r\n self.logp = logProb\r\n self.length = length\r\n\r\n def eval(self, alpha=1.0):\r\n reward = 0\r\n #TODO: find a suitable reward function\r\n\r\n return self.logp / float(self.length - 1 + 1e-6) + alpha * reward\r\n\r\n def __lt__(self, other):\r\n return self.length < other.length\r\n\r\n def __gt__(self, other):\r\n return self.length > other.length\r\n\r\n\r\nclass Seq2Seq(nn.Module):\r\n def __init__(self, encoder, decoder, src_pad_idx, device):\r\n \"\"\"The seq2seq model for consolidating the Enocder Decoder into a single class\r\n @param encoder (Encoder): Encoder class for encoding sentences \r\n @param decoder (Decoder): Decoder class for decoding sentences\r\n @src_pad_idx (int): the index used for padding\r\n @device (str): The device on which the model needs to run (cuda or cpu)\"\"\"\r\n\r\n super(Seq2Seq, self).__init__()\r\n\r\n self.encoder = encoder\r\n self.decoder = decoder\r\n\r\n self.src_pad_idx = src_pad_idx\r\n\r\n self.device = device\r\n\r\n def create_masks(self, src):\r\n return (src != self.src_pad_idx).permute(1, 0)\r\n\r\n def forward(self, src, src_len, trg, teacher_forcing=0.5):\r\n \"\"\"Forward function for the seq2seq model\r\n @param src (torch tensor): the source sentences\r\n @param src_len (torch tensor): the lengths of the source sentences\r\n @param trg (torch tensor): the target sentences\r\n @param teacher_forcing (float): ratio for using target input for faster training. Needs to be between 0 and 1\r\n returns outputs (torch tensor): the tensor of the predicted target sentence\"\"\"\r\n\r\n # src --> [src len, batch size]\r\n # src_len --> [batch size]\r\n # trg --> [trg len, batch size]\r\n \r\n batch_size = src.shape[1]\r\n trg_len = trg.shape[0]\r\n trg_vocab_size = self.decoder.output_dim\r\n\r\n # tensor to store the output values\r\n outputs = torch.zeros(trg_len, batch_size, trg_vocab_size).to(self.device)\r\n\r\n encoder_outputs, (hidden, cell) = self.encoder(src, src_len)\r\n\r\n # First token for the trg is always \r\n dec_inputs = trg[0, :]\r\n\r\n masks = self.create_masks(src)\r\n\r\n for t in range(1, trg_len):\r\n output, (hidden, cell), _ = self.decoder(dec_inputs, hidden, cell, encoder_outputs, masks)\r\n\r\n outputs[t] = output\r\n\r\n teacher_force = random.random() < teacher_forcing \r\n \r\n top1 = output.argmax(1)\r\n\r\n dec_inputs = trg[t] if teacher_force else top1\r\n\r\n return outputs\r\n\r\n def predict(self, src, src_len, trg_pad_token, max_len=50):\r\n \"\"\"Predict the decoder after the training is complete\r\n @param src (Pytorch tensor): input sentence for encoder\r\n @param src_len (Pytorch tensor): lneghts of the input sentences\r\n @param trg_pad_token (int): the padding token index for target\"\"\"\r\n\r\n # src --> [src len, batch size]\r\n # src_len --> [batch size]\r\n\r\n with torch.no_grad():\r\n enc_output, (hidden, cell) = self.encoder(src, src_len)\r\n\r\n masks = self.create_masks(src)\r\n\r\n trg_indexes = [[1,] * src.shape[1]] # Target token idx\r\n\r\n for i in range(max_len):\r\n trg_tensor = torch.LongTensor(trg_indexes)[-1, :].to(self.device)\r\n\r\n with torch.no_grad():\r\n output, (hidden, cell), _ = self.decoder(trg_tensor, hidden, cell, enc_output, masks)\r\n\r\n _, pred_token = output.data.topk(1)\r\n\r\n trg_indexes.append([_.item() for _ in pred_token])\r\n\r\n trgs = np.array(trg_indexes)\r\n trgs = [trgs[1:,i] for i in range(trgs.shape[1])]\r\n\r\n return trgs\r\n\r\n def beam_decode(self, src, src_len, trg_sos_token, trg_pad_token, max_len=50):\r\n \"\"\"Using beam search algo for decoding\"\"\"\r\n\r\n beam_width = 10 #TODO: add in params\r\n topk = 1 #TODO: add in params\r\n\r\n decoded_batch = []\r\n\r\n with torch.no_grad():\r\n enc_output, (hidden, cell) = self.encoder(src, src_len)\r\n\r\n masks = self.create_masks(src)\r\n\r\n for i in range(src.shape[1]):\r\n dec_hidden = hidden[i,:].unsqueeze(0)\r\n dec_cell = cell[i,:].unsqueeze(0)\r\n encoder_output = enc_output[:,i,:].unsqueeze(1)\r\n\r\n dec_input = torch.LongTensor([1]).to(self.device) # SOS token\r\n\r\n mask = masks[i,:].unsqueeze(0)\r\n\r\n end_nodes = []\r\n number_required = min(topk + 1, topk - len(end_nodes))\r\n\r\n node = BeamSearchNode((dec_hidden, dec_cell), None, dec_input, 0, 1)\r\n nodes = PriorityQueue()\r\n\r\n nodes.put((-node.eval(), node))\r\n qsize = 1\r\n\r\n while True:\r\n if qsize > 100: break # MAX length for beam search\r\n\r\n score, n = nodes.get()\r\n\r\n decoder_input = n.wordid\r\n dec_hidden, dec_cell = n.h\r\n\r\n if n.wordid.item() == 2 and n.prevNode != None:\r\n end_nodes.append((score, n))\r\n \r\n if len(end_nodes) >= number_required:\r\n break\r\n else:\r\n continue\r\n\r\n dec_output, (dec_hidden, dec_cell), _ = self.decoder(decoder_input, dec_hidden, dec_cell, encoder_output, mask)\r\n dec_output = F.log_softmax(dec_output, dim=1)\r\n # dec_output --> [1, output dim]\r\n\r\n score, indexes = torch.topk(dec_output, beam_width)\r\n\r\n for newk in range(beam_width):\r\n decoded_t = indexes[0][newk].view(-1, 1)[0]\r\n sc = score[0][newk].item()\r\n\r\n node = BeamSearchNode((dec_hidden, dec_cell), n, decoded_t, n.logp + sc, n.length + 1)\r\n sc = -node.eval()\r\n nodes.put((sc, node))\r\n\r\n qsize += 1\r\n\r\n\r\n if len(end_nodes) == 0:\r\n end_nodes = [nodes.get() for _ in range(topk)]\r\n\r\n utterances = []\r\n for sc, n in sorted(end_nodes, key=operator.itemgetter(0)):\r\n utterance = []\r\n utterance.append(n.wordid.item())\r\n\r\n # back trace queue\r\n while n.prevNode:\r\n n = n.prevNode\r\n utterance.append(n.wordid.item())\r\n\r\n utterance = utterance[::-1]\r\n utterances.append(utterance)\r\n\r\n decoded_batch.extend(utterances)\r\n\r\n return decoded_batch\r\n\r\n\r\nif __name__ == \"__main__\":\r\n enc = Encoder(10, 5, 3, 3)\r\n dec = Decoder(10, 5, 3, 3)\r\n\r\n input = torch.randint(0, 9, (10, 100))\r\n inp_len = torch.randint(1, 11, (100,))\r\n\r\n out, (hidden, cell) = enc(input, inp_len)\r\n\r\n model = Seq2Seq(enc, dec, -1, 'cpu')\r\n\r\n outputs = model(input, inp_len, input)\r\n\r\n print(outputs.shape)","sub_path":"seq2seq/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"316067585","text":"import cv2 as cv\n\nINPUT_IMAGE = \"./img/img.jpg\"\n\ndef creates_histogram(img, height, width):\n\thist = [0 for x in range(256)]\n\n\t# fills in histogram\n\tfor h in range(height):\n\t\tfor w in range(width):\n\t\t\thist[int(img[h, w])] += 1\n\n\treturn hist\n\ndef remove_pixels(height, width, perc):\n n = round((height * width) * perc)\n removed = 0\n\n # removes from the end of color spectrum\n\n # removes from the beginning of color spectrum\n\n\n\ndef normalizes(img, height, width, oldmax, oldmin):\n\tnewmax = int(img[0, 0]);\n\tnewmin = int(img[0, 0]);\n\n\t# finds max and min values in color spectrum within an image\n\tfor h in range(height):\n\t\tfor w in range(width):\n\t\t\tif int(img[h, w]) > newmax:\n\t\t\t\tnewmax = int(img[h, w])\n\t\t\telif int(img[h, w] < newmin):\n\t\t\t\tnewmin = int(img[h, w])\n\n\n\t# normalize an image using max and min values found\n\tfor h in range(height):\n\t\tfor w in range(width):\n\t\t\timg[h, w] = round(((int(img[h, w]) - newmin)/ float(newmax - newmin)) * (oldmax - oldmin)+0)\n\n\treturn img\n\t\n\ndef main ():\n\timg_original = cv.imread(INPUT_IMAGE)\n\timg_norm = cv.imread(INPUT_IMAGE, 0)\n\theight, width, channel = img_original.shape\n\t\n\t#img_norm = normalizes(img_norm, height, width, 0, 255)\n\tcreates_histogram(img_norm, height, width)\n\t#cv.imwrite('./img/01-normalized.jpg', img_norm)\n\n\nmain()\n","sub_path":"T3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"637754922","text":"#Camel Coding Game\n\nprint('Welcome to Camel!')\nprint('You have stolen a camel to make your way across the great Mobi desert.')\nprint('The natives want their camel back and are chasing you down! Survive your desert trek and out run the natives.')\n\ntraveledMiles = 0\nthirst = 0\ncamelTiredness = 0\nnativesTraveled = -20\ncanteenDrinks = 20\nimport random\n\ndone = False \nwhile done == False:\n print('A. Drink from your canteen.')\n print('B. Ahead moderate speed.')\n print('C. Ahead full speed.')\n print('D. Stop for the night.')\n print('E. Status check.')\n print('Q. Quit.')\n user_choice = input('What is your choice? ')\n if user_choice.upper() == 'Q':\n done = True\n elif user_choice.upper() == 'E':\n print('Miles traveled:', traveledMiles)\n print('Drinks in canteen:', canteenDrinks)\n print('The natives are', nativesTraveled, 'miles behind you.')\n elif user_choice.upper() == 'D':\n camelTiredness = 0\n print('The camel is happy!')\n nativesTraveled += random.randint(7, 15)\n elif user_choice.upper() == 'C':\n traveledMiles += random.randint(10, 21)\n print('Miles traveled:', traveledMiles)\n thirst += 1\n camelTiredness += random.randint(1, 4)\n nativesTraveled += random.randint(7, 15)\n elif user_choice.upper() == 'B':\n traveledMiles += random.randint(5, 13)\n print('Miles traveled:', traveledMiles)\n thirst += 1\n camelTiredness += 1\n nativesTraveled += random.randint(7, 15)\n elif user_choice.upper() == 'A':\n if canteenDrinks > 0:\n canteenDrinks -= 1\n print('Drinks in canteen:', canteenDrinks)\n thirst = 0\n else:\n print('Error: no drinks in canteen.')\n\nif thirst > 4:\n print('You are thirsty.')\nelif thirst > 6:\n print('You died of thirst!')\n done = True\nif camelTiredness > 5:\n print('Your camel is getting tired.')\nelif camelTiredness > 8:\n print('Your camel is dead.')\nif nativesTraveled == 0:\n print(\"You've been caught! Game over, try again.\")\n done = True\nelif nativesTraveled <= -15:\n print('The natives are getting close!')\nif traveledMiles == 200:\n print('You won! Game over, play again!')\n done = True\n","sub_path":"Lab 04 - Camel/main_program.py","file_name":"main_program.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"155437124","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom aw.LbsTestCase import LbsTestCase\nfrom aw.utils.kpireport.CnrLinearityReport import CnrLinearityReport\n\n\nclass BF_CNR_linearity_L1L5_0001(LbsTestCase):\n\n def __init__(self):\n super(BF_CNR_linearity_L1L5_0001, self).__init__()\n self.TestCaseList = [\"BF_CNR_linearity_L1L5_0001\"]\n\n def setup(self):\n super(BF_CNR_linearity_L1L5_0001, self).setup()\n self.sceneFile = r'D:\\posapp\\Scenarios\\Test Case\\TestCase_For_Report\\Statics_ALL_L1L5\\Statics_ALL_L1L5.scn'\n self.dbm = -120\n self.endDBM = -165\n \n self.setupStep('6700设备初始化')\n self.assertSuc(self.aw_initGss7000())\n \n self.setupStep(\"选择要播放场景\")\n self.assertSuc(self.gss7000.aw_GSS7000SelectScenario(self.sceneFile))\n \n self.setupStep(\"开始播放场景\")\n self.assertSuc(self.gss7000.aw_GSS7000RunScenario())\n \n def BF_CNR_linearity_L1L5_0001(self):\n \n while self.dbm > self.endDBM:\n self.testStep('模拟器信号设置 %sdBm' % str(self.dbm))\n self.assertSuc(self.gss7000.aw_Gss7000SetSignalLevel(self.dbm))\n \n startTime = self.assertSuc(self.gss7000.aw_GSS7000GetCurrentTime())\n \n self.testStep('等待2min')\n self.sleep(120)\n \n endTime = self.assertSuc(self.gss7000.aw_GSS7000GetCurrentTime())\n \n self.testStep('记录测试信息')\n CnrLinearityReport.getInstance().aw_writeRow([self.dbm, startTime, endTime])\n \n self.testStep('信号衰减1db')\n self.dbm -= 1\n \n self.testStep('停止读取nmea信息')\n self.assertSuc(self.lbs.aw_stopReadPort())\n\n self.testStep(\"7000停止播放\")\n self.gss7000.aw_GSS7000EndScenario()\n \n self.testStep('测试结果分析')\n CnrLinearityReport.getInstance().aw_calculateKPI()\n \n def teardown(self):\n super(BF_CNR_linearity_L1L5_0001, self).teardown()\n self.teardownStep(\"7000停止播放\")\n self.gss7000.aw_GSS7000EndScenario()\n \n","sub_path":"script/GnssBaseLine/CNR/BF_CNR_linearity_L1L5_0001.py","file_name":"BF_CNR_linearity_L1L5_0001.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"602598480","text":"from math import ceil\nfrom math import floor\n\nimport numpy as np\nfrom sklearn.cluster import KMeans\n\n\ndef train_test_split(ser, train_size, shuffle=True, random_seed=0):\n \"\"\"\n Splits data array into two shuffled stacks.\n\n Parameters\n ----------\n ser : numpy.array\n shape = (num_series, series_len, num_meters)\n - num_series : Amount of time series.\n - series_len : Length of each time series.\n - num_meters : Meters contained in the array.\n train_size : float\n Proportion of data added to the first stack\n shuffle : bool, default=True\n Shuffle the data before splitting it\n random_seed : int, default=0\n If < 0, data is not shuffled\n\n Returns\n -------\n ser_train : numpy.array\n shape = (num_series * train_size, series_len, num_meters)\n ser_test : numpy.array\n shape = (num_series * test_size, series_len, num_meters)\n Where test_size = 1 - train_size\n\n \"\"\"\n assert 0 < train_size < 1, \"Train size must be in range (0, 1)\"\n\n # We don't want to modify the original series\n ser = ser.copy()\n\n # Compute the number of time series that will be used in training\n num_series = ser.shape[0]\n num_train = ceil(num_series * train_size)\n if num_train == num_series:\n raise ValueError(f\"train_size {train_size} returns the 100% of series\")\n\n # Shuffle our time series array\n if shuffle and random_seed > 0:\n np.random.seed(random_seed)\n np.random.shuffle(ser)\n\n # Split the shuffled array into train and tests\n ser_train = ser[:num_train, :, :]\n ser_test = ser[num_train:, :, :]\n\n num_new_series = ser_train.shape[0] + ser_test.shape[0]\n assert num_series == num_new_series, f\"Number of time series after split\" \\\n f\"{num_new_series}\\ndoes not match \" \\\n f\"the number before split {num_series}\"\n\n return ser_train, ser_test\n\n\ndef feature_target_split(ser, meters, main=\"_main\"):\n \"\"\"\n Splits data array into features (X) and targets (Y).\n\n Parameters\n ----------\n ser : numpy.array\n shape = (num_series, series_len, num_meters)\n - num_series : Amount of time series.\n - series_len : Length of each time series.\n - num_meters : Meters contained in the array.\n meters : list\n List of meter names, sorted alphabetically and according to ser order.\n Its length must equal num_meters (see above)\n main : str, default='_main'\n Name of the main meter, the one that is used as feature (X). Must be\n contained in meters list\n\n Returns\n -------\n x : numpy.array\n shape = (num_series, series_len, 1)\n y : numpy.array\n shape = (num_series, series_len, num_meters - 1)\n\n \"\"\"\n assert meters == sorted(set(meters)), \"meters must be a sorted list of \" \\\n \"non-duplicated elements\"\n assert main in meters, f\"'{main}' missing in meters:\\n\" \\\n f\"{(', '.join(meters))}\"\n\n # Locate the position of the main meter\n idx = meters.index(main)\n\n # Split X and Y data\n x = ser[:, :, idx].copy()\n x = np.expand_dims(x, axis=2)\n y = np.delete(ser.copy(), idx, axis=2)\n\n return x, y\n\n\ndef normalize_meters(ser, max_values=None, subtract_mean=False):\n \"\"\"\n Normalize the meters values for the ser data array.\n\n Parameters\n ----------\n ser : numpy.array\n shape = (num_series, series_len, num_meters)\n - num_series : Amount of time series.\n - series_len : Length of each time series.\n - num_meters : Meters contained in the array.\n max_values : numpy.array, default=None\n shape = (num_meters, )\n Maximum value expected for each meter. If None is supplied, the array\n is created based on the given ser array.\n subtract_mean : bool, default=False\n If True, subtract the mean of each sequence, to center it around 0.\n\n Returns\n -------\n ser : numpy.array\n Normalized values.\n max_values : numpy.array\n\n \"\"\"\n # We do not want to modify the original series\n ser = ser.copy()\n\n if max_values is not None:\n # Ensure max_values is a numpy array\n max_values = np.array(max_values)\n if len(max_values.flatten()) != ser.shape[2]:\n raise ValueError(f\"Length of max_values array\"\n f\"({len(max_values.flatten())}) must be the \"\n f\"number of meters in the series \"\n f\"({ser.shape[2]})\")\n else:\n max_values = ser.max(axis=1).max(axis=0)\n\n max_values = max_values.reshape((1, 1, ser.shape[2]))\n ser = ser / max_values\n\n # Fill NaNs in case one max value is 0\n ser = np.nan_to_num(ser)\n\n if subtract_mean:\n # Make every sequence have mean 0\n ser_mean = ser.mean(axis=1)\n ser -= np.repeat(ser_mean[:, :, np.newaxis], ser.shape[1], axis=1)\n assert (ser.mean(axis=1).round(3)).sum() == 0, \"Mean of sequences is\" \\\n \"not 0\"\n\n return ser, max_values\n\n\ndef denormalize_meters(ser, max_values):\n \"\"\"\n Denormalizes the values of the ser data array.\n\n Parameters\n ----------\n ser : numpy.array\n shape = (num_series, series_len, num_meters)\n - num_series : Amount of time series.\n - series_len : Length of each time series.\n - num_meters : Meters contained in the array.\n max_values : numpy.array\n shape = (num_meters, )\n Maximum value expected for each meter.\n\n Returns\n -------\n ser : numpy.array\n Denormalized values.\n max_values : numpy.array\n\n \"\"\"\n # We do not want to modify the original series\n ser = ser.copy()\n\n # Ensure max_values is a numpy array\n max_values = np.array(max_values)\n\n if len(max_values.flatten()) != ser.shape[2]:\n raise ValueError(f\"Length of max_values array\"\n f\"({len(max_values.flatten())}) must be the \"\n f\"number of meters in the series \"\n f\"({ser.shape[2]})\")\n\n # Ensure proper dimensions\n max_values = max_values.reshape((1, 1, ser.shape[2]))\n\n ser = ser * max_values\n return ser\n\n\ndef _get_cluster_centroids(ser):\n \"\"\"\n Returns ON and OFF cluster centroids' mean and std\n\n Parameters\n ----------\n ser : numpy.array\n shape = (num_series, series_len, num_meters)\n - num_series : Amount of time series.\n - series_len : Length of each time series.\n - num_meters : Meters contained in the array.\n\n Returns\n -------\n mean : numpy.array\n shape = (num_meters,)\n std : numpy.array\n shape = (num_meters,)\n\n \"\"\"\n # We dont want to modify the original series\n ser = ser.copy()\n\n # Reshape in order to have one dimension per meter\n num_meters = ser.shape[2]\n\n # Initialize mean and std arrays\n mean = np.zeros((num_meters, 2))\n std = np.zeros((num_meters, 2))\n\n for idx in range(num_meters):\n # Take one meter record\n meter = ser[:, :, idx].flatten()\n meter = meter.reshape((len(meter), -1))\n kmeans = KMeans(n_clusters=2).fit(meter)\n\n # The mean of a cluster is the cluster centroid\n mean[idx, :] = kmeans.cluster_centers_.reshape(2)\n\n # Compute the standard deviation of the points in\n # each cluster\n labels = kmeans.labels_\n lab0 = meter[labels == 0]\n lab1 = meter[labels == 1]\n std[idx, 0] = lab0.std()\n std[idx, 1] = lab1.std()\n\n return mean, std\n\n\ndef get_thresholds(ser, use_std=True, return_mean=False):\n \"\"\"\n Returns the estimated thresholds that splits ON and OFF appliances states.\n\n Parameters\n ----------\n ser : numpy.array\n shape = (num_series, series_len, num_meters)\n - num_series : Amount of time series.\n - series_len : Length of each time series.\n - num_meters : Meters contained in the array.\n use_std : bool, default=True\n Consider the standard deviation of each cluster when computing the\n threshold. If not, the threshold is set in the middle point between\n cluster centroids.\n return_mean : bool, default=False\n If True, return the means as second parameter.\n\n Returns\n -------\n threshold : numpy.array\n shape = (num_meters,)\n mean : numpy.array\n shape = (num_meters,)\n Only returned when return_mean is True (default False)\n\n \"\"\"\n mean, std = _get_cluster_centroids(ser)\n\n # Sigma is a value between 0 and 1\n # sigma = the distance from OFF to ON at which we set the threshold\n if use_std:\n sigma = std[:, 0] / (std.sum(axis=1))\n sigma = np.nan_to_num(sigma)\n else:\n sigma = np.ones(mean.shape[0]) * .5\n\n # Add threshold\n threshold = mean[:, 0] + sigma * (mean[:, 1] - mean[:, 0])\n\n # Compute the new mean of each cluster\n for idx in range(mean.shape[0]):\n # Flatten the series\n meter = ser[:, :, idx].flatten()\n mask_on = meter >= threshold[idx]\n mean[idx, 0] = meter[~mask_on].mean()\n mean[idx, 1] = meter[mask_on].mean()\n\n if return_mean:\n return threshold, np.sort(mean)\n else:\n return threshold\n\n\ndef get_status(ser, thresholds):\n \"\"\"\n\n Parameters\n ----------\n ser : numpy.array\n shape = (num_series, series_len, num_meters)\n - num_series : Amount of time series.\n - series_len : Length of each time series.\n - num_meters : Meters contained in the array.\n thresholds : numpy.array\n shape = (num_meters,)\n\n Returns\n -------\n ser_bin : numpy.array\n shape = (num_series, series_len, num_meters)\n With binary values indicating ON (1) and OFF (0) states.\n \"\"\"\n # We don't want to modify the original series\n ser = ser.copy()\n\n ser_bin = np.zeros(ser.shape)\n num_app = ser.shape[-1]\n\n # Iterate through all the appliances\n for idx in range(num_app):\n if len(ser.shape) == 3:\n mask_on = ser[:, :, idx] > thresholds[idx]\n ser_bin[:, :, idx] = mask_on.astype(int)\n else:\n mask_on = ser[:, idx] > thresholds[idx]\n ser_bin[:, idx] = mask_on.astype(int)\n\n ser_bin = ser_bin.astype(int)\n\n return ser_bin\n\n\ndef preprocessing_pipeline_dict(ser, meters, train_size=.6, validation_size=.2,\n main=\"_main\", shuffle=True, random_seed=0,\n thresholds=None,\n normalize=True):\n \"\"\"\n This function serves as a pipeline for preprocessing. It takes the whole\n array of data, splits it into train-validation-tests, normalize its values\n and computes the binary classification for Y data.\n\n Parameters\n ----------\n ser : numpy.array\n shape = (num_series, series_len, num_meters)\n - num_series : Amount of time series.\n - series_len : Length of each time series.\n - num_meters : Meters contained in the array.\n meters : list\n Names of the meters contained in ser, sorted accordingly\n train_size : float, default=0.6\n Proportion of train data\n validation_size : float, default=0.2\n Proportion of validation data\n main : str, default='_main'\n Name of the main meter, that must be contained in meters list\n shuffle : bool, default=True\n Shuffles the data before splitting it\n random_seed : int, default=0\n thresholds : list, default=None\n If not provided, they are computed.\n normalize : bool, default=True\n Normalize the data. Please bear in mind that the thresholds stored\n for binarization depend on whether you have applied normalization\n or not.\n\n Returns\n -------\n dict_prepro : dictionary\n\n \"\"\"\n\n num_series = ser.shape[0]\n if floor(num_series * train_size) <= 0:\n raise ValueError(f\"Train size: {train_size} is too low for the given \"\n f\"amount of series: {num_series}\")\n if floor(num_series * validation_size) <= 0:\n raise ValueError(f\"Validation size: {validation_size} is too low for \"\n f\"the given amount of series: {num_series}\")\n\n # Split data intro train and validation+tests\n ser_train, ser_test = train_test_split(ser, train_size,\n random_seed=random_seed,\n shuffle=shuffle)\n\n # Re-escale validation size. Split remaining data into validation and tests\n validation_size /= (1 - train_size)\n ser_val, ser_test = train_test_split(ser_test, validation_size,\n random_seed=random_seed,\n shuffle=shuffle)\n\n # Split data into X and Y\n x_train, y_train = feature_target_split(ser_train, meters, main=main)\n\n x_val, y_val = feature_target_split(ser_val, meters, main=main)\n\n x_test, y_test = feature_target_split(ser_test, meters)\n\n # Normalize\n if normalize:\n x_train, x_max = normalize_meters(x_train)\n y_train, y_max = normalize_meters(y_train)\n\n x_val, _ = normalize_meters(x_val, max_values=x_max)\n y_val, _ = normalize_meters(y_val, max_values=y_max)\n\n x_test, _ = normalize_meters(x_test, max_values=x_max)\n y_test, _ = normalize_meters(y_test, max_values=y_max)\n else:\n x_max = None\n y_max = None\n\n # Get the binary meter status of each Y series\n if thresholds is None:\n thresholds = get_thresholds(y_train)\n bin_train = get_status(y_train, thresholds)\n bin_val = get_status(y_val, thresholds)\n bin_test = get_status(y_test, thresholds)\n\n # Appliance info\n appliances = meters.copy()\n appliances.remove(\"_main\")\n num_appliances = len(appliances)\n\n # Include al the info into a dictionary\n dict_prepro = {\"train\": {\"x\": x_train,\n \"y\": y_train,\n \"bin\": bin_train},\n \"validation\": {\"x\": x_val,\n \"y\": y_val,\n \"bin\": bin_val},\n \"tests\": {\"x\": x_test,\n \"y\": y_test,\n \"bin\": bin_test},\n \"max_values\": {\"x\": x_max,\n \"y\": y_max},\n \"thresholds\": thresholds,\n \"appliances\": appliances,\n \"num_appliances\": num_appliances}\n\n return dict_prepro\n\n\ndef _get_app_status_by_duration(y, threshold, min_off, min_on):\n \"\"\"\n\n Parameters\n ----------\n y : numpy.array\n shape = (num_series, series_len)\n - num_series : Amount of time series.\n - series_len : Length of each time series.\n threshold : float\n min_off : int\n min_on : int\n\n Returns\n -------\n s : numpy.array\n shape = (num_series, series_len)\n With binary values indicating ON (1) and OFF (0) states.\n \"\"\"\n shape_original = y.shape\n y = y.flatten().copy()\n\n condition = y > threshold\n # Find the indicies of changes in \"condition\"\n d = np.diff(condition)\n idx = d.nonzero()[0]\n\n # We need to start things after the change in \"condition\". Therefore,\n # we'll shift the index by 1 to the right.\n idx += 1\n\n if condition[0]:\n # If the start of condition is True prepend a 0\n idx = np.r_[0, idx]\n\n if condition[-1]:\n # If the end of condition is True, append the length of the array\n idx = np.r_[idx, condition.size] # Edit\n\n # Reshape the result into two columns\n idx.shape = (-1, 2)\n on_events = idx[:, 0].copy()\n off_events = idx[:, 1].copy()\n assert len(on_events) == len(off_events)\n\n if len(on_events) > 0:\n off_duration = on_events[1:] - off_events[:-1]\n off_duration = np.insert(off_duration, 0, 1000.)\n on_events = on_events[off_duration > min_off]\n off_events = off_events[np.roll(off_duration, -1) > min_off]\n assert len(on_events) == len(off_events)\n\n on_duration = off_events - on_events\n on_events = on_events[on_duration > min_on]\n off_events = off_events[on_duration > min_on]\n\n s = y.copy()\n s[:] = 0.\n\n for on, off in zip(on_events, off_events):\n s[on:off] = 1.\n\n s = np.reshape(s, shape_original)\n\n return s\n\n\ndef get_status_by_duration(ser, thresholds, min_off, min_on):\n \"\"\"\n\n Parameters\n ----------\n ser : numpy.array\n shape = (num_series, series_len, num_meters)\n - num_series : Amount of time series.\n - series_len : Length of each time series.\n - num_meters : Meters contained in the array.\n thresholds : numpy.array\n shape = (num_meters,)\n min_off : numpy.array\n shape = (num_meters,)\n min_on : numpy.array\n shape = (num_meters,)\n\n Returns\n -------\n ser_bin : numpy.array\n shape = (num_series, series_len, num_meters)\n With binary values indicating ON (1) and OFF (0) states.\n \"\"\"\n num_apps = ser.shape[-1]\n ser_bin = ser.copy()\n\n msg = f\"Length of thresholds ({len(thresholds)})\\n\" \\\n f\"and number of appliances ({num_apps}) doesn't match\\n\"\n assert len(thresholds) == num_apps, msg\n\n msg = f\"Length of thresholds ({len(thresholds)})\\n\" \\\n f\"and min_on ({len(min_on)}) doesn't match\\n\"\n assert len(thresholds) == len(min_on), msg\n\n msg = f\"Length of thresholds ({len(thresholds)})\\n\" \\\n f\"and min_off ({len(min_off)}) doesn't match\\n\"\n assert len(thresholds) == len(min_off), msg\n\n for idx in range(num_apps):\n if ser.ndim == 3:\n y = ser[:, :, idx]\n ser_bin[:, :, idx] = _get_app_status_by_duration(y,\n thresholds[idx],\n min_off[idx],\n min_on[idx])\n elif ser.ndim == 2:\n y = ser[:, idx]\n ser_bin[:, idx] = _get_app_status_by_duration(y,\n thresholds[idx],\n min_off[idx],\n min_on[idx])\n\n return ser_bin\n\n\ndef get_status_means(ser, status):\n \"\"\"\n Get means of both status.\n \"\"\"\n\n means = np.zeros((ser.shape[2], 2))\n\n # Compute the new mean of each cluster\n for idx in range(ser.shape[2]):\n # Flatten the series\n meter = ser[:, :, idx].flatten()\n mask_on = status[:, :, idx].flatten() > 0\n means[idx, 0] = meter[~mask_on].mean()\n means[idx, 1] = meter[mask_on].mean()\n\n return means\n","sub_path":"better_nilm/model/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":18915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"213204373","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Feb 8 06:11:39 2019\r\n\r\n@author: acer pc\r\n\"\"\"\r\nprint(\"Want to create a Prime Number list?\")\r\nprint(\"Okay lets get to it\")\r\nprint(\"input the start point and end point of the range\")\r\na= int( input(\"Start point: \"))\r\nb= int(input(\"End point: \"))\r\nprimes=[]\r\nfor prime_nums in range(a,b):\r\n prime_check = True \r\n for num in range (2, (int(prime_nums**0.5) + 1)):\r\n if prime_nums % num ==0:\r\n prime_check = False \r\n break\r\n if prime_check:\r\n primes.append(prime_nums)\r\nprint(primes)","sub_path":"Prime Number Generator.py","file_name":"Prime Number Generator.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"233626835","text":"'''25% overlap python code optimized by david / vida\nCreated on Apr 10, 2018\n\n@author: vxp126\n'''\n\nimport time\nfrom datetime import timedelta\nimport numpy as np # To define array and some basic math on arrays\nimport os # To find and change directories\nimport matplotlib.pyplot as plt # To plot the data\nfrom scipy.signal import hilbert # To do hilbert transform\nfrom subprocess import Popen, PIPE\nimport subprocess\nfrom time import sleep\nimport pydevd\n\n# settings\nen_remote_dbg = 0 # enable remote debugging. Enable debug server first!\nen_echo_fig = 1\n\n# remote debug setup\nserver_ip = '129.22.143.84'\nclient_ip = '129.22.143.39'\nif en_remote_dbg:\n from pydevd_file_utils import setup_client_server_paths\n server_path = '/root/ultrasound_python/'\n # client_path = 'D:\\\\GDrive\\\\WORKSPACES\\\\Eclipse_Python_2018\\\\RemoteSystemsTempFiles\\\\' + \\\n # server_ip + '\\\\root\\\\nmr_pcb20_hdl10_2018\\\\MAIN_nmr_code\\\\' # client\n # path with remote system\n client_path = 'V:\\\\ultrasound_python\\\\' # client path with samba\n PATH_TRANSLATION = [(client_path, server_path)]\n setup_client_server_paths(PATH_TRANSLATION)\n pydevd.settrace(client_ip)\n\n\ntime_sample = 8000 # number of time samples coming from sampling frequency\nchannel = 88 # number of total initial channels including overlapping\n\n'''\na = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])\nb = np.array([10, 20, 50])\n\na = np.reshape(a, (3, 3))\nb = np.reshape(b, (3, 1))\nc = a * b\n\n# test reshaping\na = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,\n 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24])\na = np.reshape(a, (3, 8))\n\nc = np.transpose(a)\nd = np.reshape(c, (6, 4))\ne = np.reshape(d, (8, 3))\nf = np.transpose(e)\n'''\n\nwhile True:\n start_time = time.monotonic()\n\n # DATA FROM SOC\n I = np.zeros(shape=(channel, time_sample)) # define an array to keep\n process = Popen(['../c_exec/de10-standard_test'],\n stdout=PIPE, stderr=PIPE, shell=True)\n stdout, stderr = process.communicate()\n stdchar = stdout.split()\n I = [int(x) for x in stdchar]\n I = np.array(I)\n I = np.reshape(I, (channel, time_sample))\n # DATA FROM SOC\n\n '''\n # DATA FROM TEXTFILE\n I = np.loadtxt('D:/10_11_18_TX_ON_Probe_on_Jello_Tube_with_Water2.txt',\n delimiter=' ', usecols=range(8000)) # in matlab\n # DATA FROM TEXTFILE\n '''\n '''\n # DATA FROM TEXTFILE\n with open(os.devnull, \"w\") as f:\n subprocess.call(['./de10-standard_test'], stdout=f)\n sleep(0.1)\n I = np.loadtxt('databank.txt',\n delimiter=' ', usecols=range(8000)) # in matlab\n # DATA FROM TEXTFILE\n '''\n\n # plot echo fig\n if (en_echo_fig):\n # plot many figures\n plt.figure(1)\n for i in range(1, 8, 1):\n plt.subplot(8, 1, i)\n plt.plot(I[i, :])\n echofig = plt.gcf()\n echofig.show()\n echofig.canvas.draw()\n echofig.clf()\n\n for i in range(0, channel):\n I[i, :] = I[i, :] - np.average(I[i, :])\n\n end_time = time.monotonic()\n print(timedelta(seconds=end_time - start_time))\n\n I = hilbert(I)\n N = 8\n\n I = np.transpose(I)\n I = np.reshape(I, (88000, 8))\n I = np.fft.fft(I, axis=1)\n\n end_time = time.monotonic()\n print(timedelta(seconds=end_time - start_time))\n\n window = np.array(\n [0.3, 0.7, 1, 1, 1, 1, 0.7, 0.3]) # define a triangular window\n\n I = I * np.transpose(window)\n I = np.reshape(I, (8000, 88))\n I = np.transpose(I)\n\n #S = np.zeros(shape=(8, time_sample))\n #S_tri = np.zeros(shape=(8, time_sample)).astype(complex)\n I_tri = np.zeros(shape=(channel, time_sample)).astype(complex)\n I_windowed = np.zeros(shape=(65, time_sample)).astype(complex)\n\n I_tri = I\n\n # for j in range(0, 81, 8):\n # S = I[range(j, j + 8), :]\n # S_tri = S * window\n # I_tri[range(j, j + 8), :] = S_tri\n\n #I = np.transpose(I)\n\n end_time = time.monotonic()\n print(timedelta(seconds=end_time - start_time))\n\n # print(I_tri[10:15,:])\n # print(I_tri.shape)\n\n I_windowed[range(0, 2), :] = I_tri[range(0, 2), :]\n I_windowed[range(63, 65), :] = I_tri[range(83, 85), :]\n\n for i in range(4, 81, 8):\n I_windowed[range(int(3 * i / 4), int(3 * i / 4) + 4),\n :] = I_tri[range(i - 1, i + 3), :]\n\n for i in range(8, 81, 8):\n I_windowed[range(int(3 * i / 4) + 1, int(3 * i / 4) + 3),\n :] = I_tri[range(i - 1, i + 1), :] + I_tri[range(i + 1, i + 3), :]\n\n # print(I_windowed[0:5,:])\n # print(I_windowed.shape)\n\n end_time = time.monotonic()\n print(timedelta(seconds=end_time - start_time))\n\n P_magnitude = np.abs(I_windowed)\n P_dimention = P_magnitude.shape\n\n P_scaled = np.zeros(shape=(P_dimention[0], P_dimention[1] // 8))\n\n comp_factor = 10\n # for i in range(0, 64):\n for k in range(0, P_dimention[1] // comp_factor):\n #P_scaled[:, k] = (1 / 10) * (P_magnitude[:, (range(10 * k, 10 * (k + 1)))].sum())\n P_scaled[:, k] = np.mean(\n P_magnitude[:, (range(comp_factor * k, comp_factor * (k + 1)))], axis=1)\n\n # P_scaled = zeros(size(P_magnitude, 1), size(P_magnitude, 2) / 16);\n\n end_time = time.monotonic()\n print(timedelta(seconds=end_time - start_time))\n\n # plt.figure()\n # im = plt.imshow(P_red, cmap = 'gray', interpolation='nearest') # use glumpy for real time display, it is faster\n # im = plt.imshow(P_magnitude[:, :1000], cmap='gray')\n plt.figure(2)\n im = plt.imshow(P_scaled[:, 50:300], cmap='gray')\n\n end_time = time.monotonic()\n print(timedelta(seconds=end_time - start_time))\n print('\\n')\n plt.colorbar(im, orientation='horizontal')\n\n fig = plt.gcf()\n fig.show()\n\n fig.canvas.draw()\n plt.pause(0.01)\n fig.clf()\n","sub_path":"AlexPCB/python/Training_fft2_reorder_AlexPCB.py","file_name":"Training_fft2_reorder_AlexPCB.py","file_ext":"py","file_size_in_byte":5780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"89249335","text":"from flask import Flask, request, abort\nfrom miri import analyze, action\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\nfrom linebot.exceptions import (\n InvalidSignatureError\n)\nfrom linebot.models import *\n\napp = Flask(__name__)\n\n# Channel Access Token\nline_bot_api = LineBotApi('')\n# Channel Secret\nhandler = WebhookHandler('')\n\n\"\"\"\n此層只負責接收與發送訊息與訊息類型\n/callback API\nmessage handler (reply)\n\"\"\"\n\n\n# 監聽所有來自 /callback 的 Post Request\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n # get X-Line-Signature header value\n signature = request.headers['X-Line-Signature']\n # get request body as text\n body = request.get_data(as_text=True)\n app.logger.info(\"Request body: \" + body)\n # handle webhook body\n # 透過接收到的訊息類型來分配到要進入哪個handler\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n abort(400)\n return 'OK'\n\n\n# 處理文字訊息\n@handler.add(MessageEvent, message=TextMessage)\ndef handle_message(event):\n \"\"\"\n :param event:\n :param message: str\n TextSendMessage(text = type(str))\n :return: TextSendMessage\n \"\"\"\n\n message = event.message.text\n response = analyze(message)\n line_bot_api.reply_message(event.reply_token, response)\n\n\n# 處理貼圖訊息\n@handler.add(MessageEvent, message=StickerMessage)\ndef handle_message(event):\n import random\n \"\"\"\n reply sticker, now random the first package of sticker and random sticker's id\n \"\"\"\n\n message = StickerSendMessage(\n package_id='1',\n sticker_id='{}'.format(random.randint(1, 21))\n )\n line_bot_api.reply_message(event.reply_token, message)\n\n\n@handler.add(PostbackEvent)\ndef handle_message(event):\n import json\n \"\"\"\n :param event:\n :param message: str\n TextSendMessage(text = type(str))\n :return: TextSendMessage\n \"\"\"\n\n data = event.postback.data\n data = json.loads(data)\n response = action(data)\n line_bot_api.reply_message(event.reply_token, response)\n\n\nimport os\nif __name__ == \"__main__\":\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"481633374","text":"# -*- coding: utf-8 -*-\nfrom openerp.osv import fields, osv\n\n\nclass estudios(osv.Model):\n _name = 'estudios.estudios'\n _description = 'Modulo de estudios'\n _columns = {\n 'name': fields.text(\"Nombre\")\n }\n\n\nclass arranques_admin(osv.Model):\n _name = 'arranques.admin'\n _inherit = \"mail.thread\"\n _description = 'Tabla para administracion de arranques'\n _columns = {\n 'name': fields.many2one('project.project', 'Clave'),\n 'nombre_corto': fields.related('name', 'nombre_corto',\n string=\"Nombre Corto\", type=\"char\"),\n 'descripcion': fields.text('Descripcion'),\n 'fecha_solicitud': fields.date('Fecha de solicitud'),\n 'fecha_entrega': fields.date('Fecha de entrega'),\n 'fecha_final': fields.date('Fecha final'),\n 'responsable_id': fields.many2one('hr.employee', 'Responsable'),\n 'jefe_id': fields.many2one('hr.employee', 'Jefe Inmediato'),\n 'jefe_celula': fields.many2one('hr.employee', string='Jefe de Célula'),\n 'coordinador': fields.many2one('hr.employee', string='Coordinador'),\n 'tarea_ids': fields.one2many('arranques.tarea', 'relation',\n string=\"Tareas\"),\n 'material_ids': fields.one2many('arranques.material',\n 'relation', string=\"Materiales\"),\n 'personal_ids': fields.one2many('arranques.personal',\n 'relation', string=\"Personal\"),\n 'tarea_estatus': fields.char('Estatus'),\n 'material_product_id': fields.many2one('product.product', 'Material'),\n 'material_cant': fields.char('Cantidad', size=25),\n 'material_estatus': fields.char('Estatus'),\n 'personal_puesto_id': fields.many2one('hr.job', 'Puesto'),\n 'personal_num_personas': fields.char('Numero de personas', size=25),\n 'personal_capacitacion': fields.char('Capacitacion'),\n 'personal_plaza_id': fields.many2one('res.country.state.city', 'Plaza'),\n 'planilla_proyecto': fields.char('Plantilla de proyecto'),\n 'etapa_actual': fields.char('Etapa actual'),\n 'permiso_especial': fields.char('Permidos especial'),\n 'no_conformidad': fields.char('No conformidad'),\n 'comentario': fields.char('Comentario de tarea'),\n 'tareas_precargadas': fields.char('Tareas precargadas'),\n 'asistentes': fields.one2many(\"arranques.admin.asistentes\",\n \"relation\", string=\"Asistentes\"),\n 'hora_inicio': fields.datetime(\"Hora de Inicio\"),\n 'hora_fin': fields.datetime(\"Hora de Fin\"),\n }\n\n\nclass jmdasistentes(osv.Model):\n _name = 'arranques.admin.asistentes'\n _columns = {\n 'name': fields.many2one('hr.employee', 'Asistentes'),\n 'relation': fields.many2one(\"arranques.admin\", \"Relacion\"),\n }\n\n\nclass arranques(osv.Model):\n _name = 'project.arranques'\n _description = 'Campos para formularios de Arranques'\n _columns = {\n 'name': fields.many2one('project.project', 'Arranques'),\n 'responsable_id': fields.many2one('hr.employee', 'Responsable'),\n 'planilla_proyecto': fields.char('Plantilla de proyecto'),\n 'fecha_inicio': fields.date('Fecha de inicio'),\n 'fecha_final': fields.date('Fecha de final')\n }\n\n\nclass campo(osv.Model):\n _name = 'project.campo'\n _description = 'Tabla asociada para el menu estudios -> campo'\n _columns = {\n 'name': fields.many2one('project.project', 'Nombre'),\n 'responsable_id': fields.many2one('hr.employee', 'Responsable'),\n 'planilla_proyecto': fields.char('Plantilla de proyecto'),\n 'fecha_inicio': fields.date('Fecha de inicio'),\n 'fecha_final': fields.date('Fecha final'),\n }\n\n\nclass procesos_intermedios(osv.Model):\n _name = 'project.procesosint'\n _description = 'Tabla asociada para el menu estudios->procesos intermedios'\n _columns = {\n 'name': fields.many2one('project.project', 'Nombre'),\n 'responsable_id': fields.many2one('hr.employee', 'Responsable'),\n 'planilla_proyecto': fields.char('Plantilla de proyecto'),\n 'fecha_inicio': fields.date('Fecha de inicio'),\n 'fecha_final': fields.date('Fecha final'),\n }\n\n\nclass procesamiento(osv.Model):\n _name = 'project.procesamiento'\n _description = 'Tabla asociada para el menu estudios -> procesamientos'\n _columns = {\n 'name': fields.many2one('project.project', 'Nombre'),\n 'responsable_id': fields.many2one('hr.employee', 'Responsable'),\n 'planilla_proyecto': fields.char('Plantilla de proyecto'),\n 'fecha_inicio': fields.date('Fecha de inicio'),\n 'fecha_final': fields.date('Fecha final'),\n }\n\n\nclass analisis(osv.Model):\n _name = 'project.analisis'\n _description = 'Tabla asociada para el menu estudios -> analisis'\n _columns = {\n 'name': fields.many2one('project.project', 'Nombre'),\n 'responsable_id': fields.many2one('hr.employee', 'Responsable'),\n 'planilla_proyecto': fields.char('Plantilla de proyecto'),\n 'fecha_inicio': fields.date('Fecha de inicio'),\n 'fecha_final': fields.date('Fecha final'),\n }\n\n\nclass entrega(osv.Model):\n _name = 'project.entrega'\n _description = 'Tabla asociada para el menu estudios -> entrega'\n _columns = {\n 'name': fields.many2one('project.project', 'Nombre'),\n 'responsable_id': fields.many2one('hr.employee', 'Responsable'),\n 'planilla_proyecto': fields.char('Plantilla de proyecto'),\n 'fecha_inicio': fields.date('Fecha de inicio'),\n 'fecha_final': fields.date('Fecha final'),\n }\n\n\nclass tareas(osv.Model):\n _inherit = 'project.task'\n _description = 'Campos para formularios de tareas en diferentes areas'\n _columns = {\n 'areas_tarea': fields.boolean('Areas de la tarea'),\n # Campos para Procesos intermedios\n 'cuota_x_hora': fields.char('Cuota x Hora'),\n 'concepto': fields.char('Concepto'),\n 'cantidad_planificada': fields.char('Cantidad planificada'),\n 'cantidad_real': fields.char('Cantidad real (Terminada)'),\n # Campos para Procesamiento\n 'fase': fields.selection([('previo', 'Previo'),\n ('en_proceso', 'En proceso')], 'Fase')\n }\n\n\n# Tareas del arranque\nclass arranques_tarea(osv.Model):\n _name = \"arranques.tarea\"\n _columns = {\n 'name': fields.char(string=\"Tarea\", size=40),\n 'responsable': fields.many2one(\"hr.employee\", string=\"Responsable\"),\n 'date': fields.date(\"Fecha\"),\n 'statuss': fields.selection([('new', 'Nueva'),\n ('working', 'En Proceso'), ('done', 'Realizada'),\n ('canceled', 'Cancelada')], \"Estado\"),\n 'relation': fields.many2one('arranques.admin', string=\"Relation\"),\n }\n\n\n# Materiales de arranques\nclass arranques_materiales(osv.Model):\n _name = \"arranques.material\"\n _columns = {\n 'name': fields.char(\"Material\"),\n 'ammount': fields.integer(\"Cantidad\"),\n 'statuss': fields.selection([('new', 'Nueva'),\n ('working', 'En Proceso'), ('done', 'Realizada'),\n ('canceled', 'Cancelada')], \"Estado\"),\n 'relation': fields.many2one('arranques.admin', string=\"Relation\"),\n }\n\n\n# Personal de arranques\nclass arranques_personal(osv.Model):\n _name = \"arranques.personal\"\n _columns = {\n 'name': fields.char(\"Descripción\"),\n 'job_id': fields.many2one(\"hr.job\", \"Puesto\"),\n 'ammount': fields.integer(\"Cantidad\"),\n 'trainning': fields.many2one(\"event.event\", \"Capacitación\"),\n 'plaza': fields.many2one(\"ea.plaza\", \"Plaza\"),\n 'relation': fields.many2one('arranques.admin', string=\"Relation\"),\n }\n\n\n# agregado FVE\nclass proyectos_todo_task(osv.Model):\n _inherit = 'project.task'\n _columns = {\n 'etapa': fields.selection([('arranques', 'Arranques'),\n ('campo', 'Campo'), ('pi', 'Procesos intermedios'),\n ('procesamiento', 'Procesamientos'), ('analisis', 'Analisis'),\n ('entrega', 'Entrega')], 'Etapa')\n }\n\n\nclass proyectos_todo_issue(osv.Model):\n _inherit = 'project.issue'\n _columns = {\n 'etapa': fields.selection([('arranques', 'Arranques'),\n ('campo', 'Campo'), ('pi', 'Procesos intermedios'),\n ('procesamiento', 'Procesamientos'), ('analisis', 'Analisis'),\n ('entrega', 'Entrega')], 'Etapa')\n }\n","sub_path":"ea/estadistica_aplicada2/estudios/estudios.py","file_name":"estudios.py","file_ext":"py","file_size_in_byte":8324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"303485589","text":"\"\"\"Module for the OdkTable class.\"\"\"\n# from ppp.config import TEMPLATE_ENV\nfrom ppp.config import get_template_env\nfrom ppp.definitions.utils import exclusion\n# from ppp.definitions.error import OdkformError\n\nTEMPLATE_ENV = None\n\n\ndef set_template_env(template):\n \"\"\"Set template env\"\"\"\n global TEMPLATE_ENV\n TEMPLATE_ENV = get_template_env(template)\n\n\nclass OdkTable:\n \"\"\"Class to represent a single ODK table from an XLSForm.\n\n Attributes:\n data (list): List of 1 OdkPrompt header and 1+ OdkPrompt rows.\n header (OdkPrompt): OdkPrompt representing table header.\n contents (list): List of OdkPrompts consisting of table rows.\n in_repeat (bool): Is this table part of a repeat group?\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize table object with empty initial values.\"\"\"\n self.data = []\n self.header = None\n self.row = self.header\n self.contents = None\n self.in_repeat = False\n\n def __repr__(self):\n \"\"\"Print representation of instance.\"\"\"\n return ''\\\n .format(self.data[0].row['name'], self.data)\n\n def add(self, odkprompt):\n \"\"\"Add a row of data from XLSForm.\n\n Args:\n odkprompt (OdkPrompt): ODK table row.\n \"\"\"\n self.data.append(odkprompt)\n\n @staticmethod\n def format_row(prompt, lang, **kwargs):\n \"\"\"Format rows row based on HTML options determined by kwargs.\n\n Args:\n prompt (OdkPrompt): The row.\n lang (str): The language.\n **kwargs: Keyword arguments.\n\n Returns:\n dict: Reformatted row.\n \"\"\"\n settings = prompt.html_options(lang=lang, **kwargs)\n table_row = prompt.to_dict(lang=lang, **settings)\n return table_row\n\n def set_header_and_contents(self, lang, **kwargs):\n \"\"\"Set header and contents of table.\n\n Args:\n lang (str): The language.\n **kwargs: Keyword arguments\n \"\"\"\n for i in self.data:\n i.row['in_group'] = True\n i.row = self.format_row(prompt=i, lang=lang, **kwargs)\n self.header = self.data[0]\n self.contents = self.data[1:]\n\n # - De-list labels\n for con in self.contents:\n con.row['label'] = con.row['label'][0] if con.row['label'] else ''\n\n # Temporary noinspection until method is added.\n # noinspection PyUnusedLocal\n @staticmethod\n def to_text():\n \"\"\"Get the text representation of the table.\"\"\"\n # def to_text(self, lang):\n # \"\"\"Get the text representation of the table.\n #\n # Args:\n # lang (str): The language.\n # Returns:\n # str: The text for this table.\n #\n # \"\"\"\n # choices = pmix.utils.d(self.choices, lang)\n #\n # choice_width = max(len(c) for c in self.choices)\n # prompt_width = max(len(p) for p in self.prompts)\n #\n # choice_format = '{:>{}}'.format(choice_width)\n # choice_labels = (choice_format.format(c) for c in self.choices)\n # choice_row = ' '.join((' ' * prompt_width, choice_labels))\n #\n # prompt_format = '{:<{}}'.format(prompt_width)\n # prompt_labels = (prompt_format.format(p) for p in self.prompts)\n #\n # option_labels = []\n # for prompt in self.prompts:\n # if prompt.odktype == 'select_one':\n # char = '*'\n # elif prompt.odktype == 'select_multiple':\n # char = '_'\n # else:\n # m = 'Unexpected type in ODK table: {}'.format(prompt.odktype)\n # raise OdkformError(m)\n # these_choices = (choice_format.format(char) for _\n # in self.choices)\n # these_labels = ' '.join(these_choices)\n # option_labels.append(these_labels)\n #\n # full_prompts = (' '.join(i) for i in zip(prompt_labels,\n # option_labels))\n # body = '\\n'.join(full_prompts)\n # result = '\\n'.join((choice_row, body))\n result = 'ODK TABLE TEXT' # Placeholder\n return result\n\n def to_html(self, lang, **kwargs):\n \"\"\"Convert to html.\n\n Args:\n lang (place): The language.\n highlighting (bool): Displays highlighted sub-sections if True.\n **kwargs: Keyword arguments.\n\n Returns:\n str: A rendered html template.\n \"\"\"\n # - Render header\n self.set_header_and_contents(lang, **kwargs)\n table = list()\n table.append(self.header.row)\n\n # - Render body\n for i in self.contents:\n if exclusion(item=i, settings=kwargs):\n continue\n\n table.append(i.row)\n\n # pylint: disable=no-member\n return TEMPLATE_ENV.get_template('content/table/table.html')\\\n .render(table=table, lang=lang, **kwargs, settings=kwargs)\n","sub_path":"ppp/odktable.py","file_name":"odktable.py","file_ext":"py","file_size_in_byte":4947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"223921289","text":"import numpy as np\nimport math\nimport time\n\n\nclass Optimization:\n def __init__(self, beta_v):\n # user\n self.f_d = 350 * 1e+6\n self.E_d = 1e-28 * self.f_d * self.f_d\n self.p_peak = 0.1\n\n # edge\n self.f_s = 2 * 1e+9\n self.E_s = 1e-28 * self.f_s * self.f_s\n self.q_peak = 1\n self.alpha = 0.2\n self.energy = 10\n\n # system\n self.B = 1e+5\n self.N = self.B * (10 ** (-0.1 * 110 - 3))\n self.beta = beta_v\n self.MaxCost = 100\n\n def local_computing(self, task_m, task_C):\n # 计算本地计算任务的cost\n\n time_local = task_m * task_C / self.f_d\n energy_local = task_m * task_C * self.E_d\n cost_local = energy_local + self.beta * time_local\n\n return cost_local, time_local, energy_local\n\n def offloading(self, h_u, task_m, task_C, x, y):\n # 计算offloading任务的cost\n\n energy_offloading = self.N / h_u * task_m * x * (pow(2, 1/self.B/x)-1)\n time_offloading = task_m * x + task_m * task_C / self.f_s + self.alpha * task_m * y\n cost_offloading = energy_offloading + self.beta * time_offloading\n\n return cost_offloading, time_offloading, energy_offloading\n\n def optimal_power(self, h_u, h_d, task_m, task_C):\n # 利用拉格朗日对偶法,得到offloading任务的最优功率分配\n # 最优的对偶变量通过二分法得到\n # print(h_u, h_d, task)\n\n # 用二分法求偏导数的零点\n def bisection_partial(C1, C2, low, high):\n # 定义偏导数\n def partial_derivative(C1, C2, var):\n obj = C1 * pow(2, 1 / self.B / var) * (1 - math.log(2) / self.B / var) - C1 + C2\n return obj\n\n # 如果偏导数恒大于0,那么说明 L_k 恒单调递增, 所以 L_k 的极小值在 low 处取得\n if partial_derivative(C1, C2, low) >= 0:\n # print(\"*** 恒单调递增\")\n return low\n\n # 如果偏导数恒小于0,那么说明 L_k 恒单调递减, 所以 L_k 的极小值在 high 处取得\n if partial_derivative(C1, C2, high) <= 1e-4:\n return high\n\n # 否则,用二分法求出偏导数的零点,即为 L_k 的极小值\n # print('*** 区间端点: ', partial_derivative(C1, C2, low), \"(\", low, \")\", partial_derivative(C1, C2, high),\n # \"(\", high, \")\")\n accuracy = 1e-20\n while high - low > accuracy:\n v = (low + high) / 2.0\n if abs(partial_derivative(C1, C2, v)) < 1e-6:\n # print('*** 循环中退出')\n break\n elif partial_derivative(C1, C2, v) > 0:\n high = v\n else:\n low = v\n # print('*** 最终偏导: ', partial_derivative(C1, C2, v), \"最终零点: \", v)\n return v\n\n def energy_used(dual):\n y = np.zeros(len(task_m))\n for i_dx in range(len(task_m)):\n # print('=== task: ', i_dx)\n y[i_dx] = bisection_partial(dual * W[i_dx], F[i_dx], Y_peak, 1e+20)\n # print('*** 零点: ', y[i_dx])\n return W * y * (np.power(2, 1/self.B/y) - 1), y\n\n # 问题参数\n A = self.N / h_u * task_m\n E = self.beta * task_m\n F = self.alpha * self.beta * task_m\n W = self.alpha * self.N / h_d * task_m\n E_edge = self.energy - sum(task_m * task_C * self.E_s)\n X_peak = 1 / (self.B * math.log2(1 + self.p_peak * h_u / self.N))\n Y_peak = 1 / (self.B * math.log2(1 + self.q_peak * h_d / self.N))\n\n # print(sum(task_m * task_C * self.E_s))\n assert (E_edge > 0)\n\n # 初始化对偶变量的最小和最大值\n LB, UB = 0, 1000\n epsilon = 1e-6\n while UB - LB > epsilon:\n lamda = (LB + UB) / 2.0\n # print('-------', 'lambda: ', lamda, UB - LB, '-------')\n condition, optimal_y = energy_used(lamda)\n # print(sum(condition))\n if sum(condition) > E_edge:\n LB = lamda\n else:\n UB = lamda\n # print('最优解', sum(condition), E_edge, lamda) # 此时的 y 即为最优解\n\n # 最优的 x\n # print('==============================')\n optimal_x = np.zeros(len(task_m))\n for index in range(len(task_m)):\n # print('=== task: ', index)\n optimal_x[index] = bisection_partial(A[index], E[index], X_peak, 1e+20)\n # print('*** 零点: ', optimal_x[index])\n\n return optimal_x, optimal_y\n\n def optimize(self, channels, m):\n # 将参数变回原来的区间\n h_u = channels[0]\n h_d = channels[1]\n task_m = channels[2: len(m)+2]\n task_C = channels[len(m)+2:]\n\n # print(task_m, task_C)\n # print(m)\n\n # local task\n cost_loc, t_loc, E_loc = [0], [0], [0]\n if sum(m) < len(m):\n task_m_loc = task_m[np.flatnonzero(1 - m)]\n task_C_loc = task_C[np.flatnonzero(1 - m)]\n cost_loc, t_loc, E_loc = self.local_computing(task_m_loc, task_C_loc)\n\n # task offloading\n cost_off, t_off, E_off = [0], [0], [0]\n if sum(m) > 0:\n task_m_off = task_m[np.flatnonzero(m)]\n task_C_off = task_C[np.flatnonzero(m)]\n optimal_x, optimal_y = self.optimal_power(h_u, h_d, task_m_off, task_C_off)\n # print(optimal_x, optimal_y)\n cost_off, t_off, E_off = self.offloading(h_u, task_m_off, task_C_off, optimal_x, optimal_y)\n\n return sum(cost_loc) + sum(cost_off), sum(t_loc) + sum(t_off), sum(E_loc) + sum(E_off)\n\n def exhaust_search(self, K):\n res, path = [], [0] * K\n\n def core(index, K):\n if index == K:\n res.append(np.array(path[:]))\n return\n for i in range(2):\n path[index] = i\n core(index + 1, K)\n core(0, K)\n return res\n\n\n# if __name__ == '__main__':\n# start_time = time.time()\n#\n# channel_MonteCarlo, K, Distance = 30000, 8, 10\n# MonteCarlo = 1000\n# channel = np.loadtxt(\n# '../data/K{0}_D{1}/InputData_MonteCarlo{2}_K{0}_Distance{1}.txt'.format(str(K), str(Distance), str(channel_MonteCarlo)))\n#\n# # 初始化结果\n# # 穷举搜索\n# decision = np.zeros((MonteCarlo, K))\n# cost, Time, energy = np.zeros(MonteCarlo), np.zeros(MonteCarlo), np.zeros(MonteCarlo)\n#\n# # local computing\n# cost_loc, Time_loc, energy_loc = np.zeros(MonteCarlo), np.zeros(MonteCarlo), np.zeros(MonteCarlo)\n#\n# # offloading\n# cost_off, Time_off, energy_off = np.zeros(MonteCarlo), np.zeros(MonteCarlo), np.zeros(MonteCarlo)\n#\n#\n# optimizer = Optimization(0.1)\n# # m_list = optimizer.exhaust_search(K)\n# # print(K, len(m_list))\n# m_list = [np.zeros(K, dtype='int'), np.ones(K, dtype='int')]\n#\n# for cyc in range(MonteCarlo):\n# t_cost = 1e+100\n# t_decision, t_time, t_energy = None, None, None\n#\n# for m in m_list:\n# _cost, _t, _e = optimizer.optimize(channel[cyc, :], m)\n#\n# if sum(m) == 0:\n# cost_loc[cyc] = _cost\n# Time_loc[cyc] = _t\n# energy_loc[cyc] = _e\n#\n# if sum(m) == K:\n# cost_off[cyc] = _cost\n# Time_off[cyc] = _t\n# energy_off[cyc] = _e\n#\n# if (sum(m) == 0 or sum(m) == K) and cyc % (MonteCarlo//10) == 0:\n# print('*** episode:', cyc, ' decision:', m, ' cost:', _cost, ' time:', _t, ' energy:',\n# _e)\n#\n# if _cost < t_cost:\n# t_cost = _cost\n# t_decision = m\n# t_time = _t\n# t_energy = _e\n#\n# decision[cyc, :] = t_decision\n# cost[cyc] = t_cost\n# Time[cyc] = t_time\n# energy[cyc] = t_energy\n#\n# if cyc % (MonteCarlo//10) == 0:\n# print('*** episode:', cyc/MonteCarlo, ' Optimal:', t_decision, ' cost:', t_cost, ' time:', t_time, ' energy:',\n# t_energy)\n#\n# # np.savetxt('../data/Binary_Exhaust_cost_MonteCarlo{0}_K{1}_Distance{2}.txt'.format(str(MonteCarlo), str(K), str(Distance)), cost)\n# # np.savetxt('../data/Binary_Exhaust_time_MonteCarlo{0}_K{1}_Distance{2}.txt'.format(str(MonteCarlo), str(K), str(Distance)), Time)\n# # np.savetxt('../data/Binary_Exhaust_energy_MonteCarlo{0}_K{1}_Distance{2}.txt'.format(str(MonteCarlo), str(K), str(Distance)), energy)\n# # np.savetxt('../data/Binary_Exhaust_decision_MonteCarlo{0}_K{1}_Distance{2}.txt'.format(str(MonteCarlo), str(K), str(Distance)), decision)\n#\n# np.savetxt('../data/K{1}_D{2}/Binary_local_cost_MonteCarlo{0}_K{1}_Distance{2}.txt'.format(str(MonteCarlo), str(K), str(Distance)), cost_loc)\n# np.savetxt('../data/K{1}_D{2}/Binary_local_time_MonteCarlo{0}_K{1}_Distance{2}.txt'.format(str(MonteCarlo), str(K), str(Distance)), Time_loc)\n# np.savetxt('../data/K{1}_D{2}/Binary_local_energy_MonteCarlo{0}_K{1}_Distance{2}.txt'.format(str(MonteCarlo), str(K), str(Distance)), energy_loc)\n#\n# np.savetxt('../data/K{1}_D{2}/Binary_offloading_cost_MonteCarlo{0}_K{1}_Distance{2}.txt'.format(str(MonteCarlo), str(K), str(Distance)), cost_off)\n# np.savetxt('../data/K{1}_D{2}/Binary_offloading_time_MonteCarlo{0}_K{1}_Distance{2}.txt'.format(str(MonteCarlo), str(K), str(Distance)), Time_off)\n# np.savetxt('../data/K{1}_D{2}/Binary_offloading_energy_MonteCarlo{0}_K{1}_Distance{2}.txt'.format(str(MonteCarlo), str(K), str(Distance)), energy_off)\n#\n# total_time = time.time() - start_time\n# print('Total time consumed:%s' % total_time)\n# print('Average time per channel:%s' % (total_time / MonteCarlo))\n\n\n\nif __name__ == '__main__':\n\n begin_time = time.time()\n beta_set = np.linspace(0.54, 0.7, 5)\n MonteCarlo = 10000\n\n # 初始化结果\n # local computing\n cost_loc, Time_loc, energy_loc = np.zeros((MonteCarlo, len(beta_set))), np.zeros(\n (MonteCarlo, len(beta_set))), np.zeros((MonteCarlo, len(beta_set)))\n\n # offloading\n cost_off, Time_off, energy_off = np.zeros((MonteCarlo, len(beta_set))), np.zeros(\n (MonteCarlo, len(beta_set))), np.zeros((MonteCarlo, len(beta_set)))\n\n for idx, beta_t in enumerate(beta_set):\n\n start_time = time.time()\n\n channel_MonteCarlo, K, Distance = 30000, 8, 14\n beta_v = beta_t\n\n print('************** user=%d, distance=%d, channel=%d, MonteCarlo=%d, beta=%f *********************' %\n (K, Distance, channel_MonteCarlo, MonteCarlo, beta_v))\n\n channel = np.loadtxt(\n '../data/K{0}_D{1}/InputData_MonteCarlo{2}_K{0}_Distance{1}.txt'.format(str(K), str(Distance), str(channel_MonteCarlo)))\n\n\n\n\n optimizer = Optimization(beta_v)\n # m_list = optimizer.exhaust_search(K)\n # print(K, len(m_list))\n m_list = [np.zeros(K, dtype='int'), np.ones(K, dtype='int')]\n\n for cyc in range(MonteCarlo):\n t_cost = 1e+100\n t_decision, t_time, t_energy = None, None, None\n\n # print(channel[cyc, :])\n\n for m in m_list:\n _cost, _t, _e = optimizer.optimize(channel[cyc, :], m)\n\n if sum(m) == 0:\n cost_loc[cyc, idx] = _cost\n Time_loc[cyc, idx] = _t\n energy_loc[cyc, idx] = _e\n\n if sum(m) == K:\n cost_off[cyc, idx] = _cost\n Time_off[cyc, idx] = _t\n energy_off[cyc, idx] = _e\n\n if (sum(m) == 0 or sum(m) == K) and (cyc % (MonteCarlo//10) == 0):\n print('*** episode:', cyc, ' decision:', m, ' cost:', _cost, ' time:', _t, ' energy:',\n _e)\n\n if _cost < t_cost:\n t_cost = _cost\n t_decision = m\n t_time = _t\n t_energy = _e\n\n # decision[cyc, :] = t_decision\n # cost[cyc] = t_cost\n # Time[cyc] = t_time\n # energy[cyc] = t_energy\n\n # print('*** episode:', cyc / MonteCarlo, ' Optimal:', t_decision, ' cost:', t_cost, ' time:', t_time,\n # ' energy:', t_energy)\n\n total_time = time.time() - start_time\n print('Total time consumed:%s' % total_time)\n print('Average time per channel:%s' % (total_time / MonteCarlo))\n\n print('Whole loop time consumed:%s h' % ((time.time()-begin_time)/3600))\n\n np.savetxt('../data/beta/Binary_local_beta_VS_cost_MonteCarlo{0}_K{1}_Distance{2}_add.txt'.format(str(MonteCarlo), str(K), str(Distance)), cost_loc)\n np.savetxt('../data/beta/Binary_local_beta_VS_time_MonteCarlo{0}_K{1}_Distance{2}_add.txt'.format(str(MonteCarlo), str(K), str(Distance)), Time_loc)\n np.savetxt('../data/beta/Binary_local_beta_VS_energy_MonteCarlo{0}_K{1}_Distance{2}_add.txt'.format(str(MonteCarlo), str(K), str(Distance)), energy_loc)\n\n np.savetxt('../data/beta/Binary_offloading_beta_VS_cost_MonteCarlo{0}_K{1}_Distance{2}_add.txt'.format(str(MonteCarlo), str(K),str(Distance)), cost_off)\n np.savetxt('../data/beta/Binary_offloading_beta_VS_time_MonteCarlo{0}_K{1}_Distance{2}_add.txt'.format(str(MonteCarlo), str(K), str(Distance)),Time_off)\n np.savetxt('../data/beta/Binary_offloading_beta_VS_energy_MonteCarlo{0}_K{1}_Distance{2}_add.txt'.format(str(MonteCarlo),str(K),str(Distance)),energy_off)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# if __name__ == '__main__':\n#\n# MonteCarlo, K = 10, 8\n# cost_loc, cost_off = np.zeros(MonteCarlo), np.zeros(MonteCarlo)\n# t_loc, t_off = np.zeros(MonteCarlo), np.zeros(MonteCarlo)\n# E_loc, E_off = np.zeros(MonteCarlo), np.zeros(MonteCarlo)\n# optimizer = Optimization(0.1)\n# channel = np.loadtxt('../data_new/channel_test.txt')\n# local, offloading = np.zeros(K), np.ones(K)\n#\n# cnt0, cnt1 = 0, 0\n# for cyc in range(MonteCarlo):\n# cost_loc[cyc], t_loc[cyc], E_loc[cyc] = optimizer.optimize(channel[cyc, :], local)\n# cost_off[cyc], t_off[cyc], E_off[cyc] = optimizer.optimize(channel[cyc, :], offloading)\n#\n# if cost_loc[cyc] > cost_off[cyc]:\n# print(1, end='')\n# cnt1 += 1\n# else:\n# print(0, end='')\n# cnt0 += 1\n# print(' local: ', cost_loc[cyc], 'offloading: ', cost_off[cyc])\n#\n# # if cyc % (MonteCarlo//10) == 0:\n# # print('episode: ', cyc/MonteCarlo)\n# print('cnt0: ', cnt0, 'cnt1: ', cnt1)\n\n # np.savetxt('../data_new/LocalComputing_MonteCarlo10000_K2_Distance10.txt', cost_loc)\n # np.savetxt('../data_new/Offloading_MonteCarlo10000_K2_Distance10.txt', cost_off)\n\n # np.savetxt('../data_new/LocalComputing_time_MonteCarlo1000_K10_Distance10.txt', t_loc)\n # np.savetxt('../data_new/Offloading_time_MonteCarlo1000_K10_Distance10.txt', t_off)\n #\n # np.savetxt('../data_new/LocalComputing_energy_MonteCarlo1000_K10_Distance10.txt', E_loc)\n # np.savetxt('../data_new/Offloading_energy_MonteCarlo1000_K10_Distance10.txt', E_off)\n\n\n\n# if __name__ == '__main__':\n# start_time = time.time()\n#\n# MonteCarlo, K = 1000, 8\n# beta_set = np.arange(0.06, 0.18, 0.02)\n# channel = np.loadtxt('../data_new/K8_D10/InputData_MonteCarlo10000_K8_Distance10.txt')\n#\n# cost = np.zeros((MonteCarlo, len(beta_set)))\n# _time, energy = np.zeros((MonteCarlo, len(beta_set))), np.zeros((MonteCarlo, len(beta_set)))\n#\n# for idx in range(len(beta_set)):\n# optimizer = Optimization(beta_set[idx])\n#\n# cnt0, cnt1 = 0, 0\n# for cyc in range(MonteCarlo):\n# tmin, t_time, t_energy = 1e+100, None, None\n# decision = None\n#\n# m_list = [np.zeros(K, dtype=int), np.ones(K, dtype=int)]\n# for m in m_list:\n# tmp, _t, _e = optimizer.optimize(channel[cyc, :], m)\n# if tmp < tmin:\n# tmin = tmp\n# decision = m\n# t_time = _t\n# t_energy = _e\n# cost[cyc, idx] = tmin\n# _time[cyc, idx] = t_time\n# energy[cyc, idx] = t_energy\n#\n# if cyc % (MonteCarlo // 10) == 0:\n# print(cyc, tmin, decision)\n#\n# if sum(decision) == 0:\n# cnt0 += 1\n# else:\n# cnt1 += 1\n#\n# print('***',beta_set[idx], cnt0, cnt1)\n#\n# total_time = time.time() - start_time\n# print('Total time consumed:%s' % total_time)\n# print('Average time per channel:%s' % (total_time / MonteCarlo))\n#\n# np.savetxt('../data_new/Exhaust_beta_VS_time_MonteCarlo1000_K8_Distance10_convex.txt', _time)\n# np.savetxt('../data_new/Exhaust_beta_VS_cost_MonteCarlo1000_K8_Distance10_convex.txt', cost)\n# np.savetxt('../data_new/Exhaust_beta_VS_energy_MonteCarlo1000_K8_Distance10_convex.txt', energy)\n\n\n\n# if __name__ == '__main__':\n# start_time = time.time()\n#\n# MonteCarlo, K = 1000, 8\n# beta_set = np.arange(0.06, 0.18, 0.02)\n# channel = np.loadtxt('../data_new/K8_D10/InputData_MonteCarlo10000_K8_Distance10.txt')\n#\n# cost_loc, time_loc, energy_loc = np.zeros((MonteCarlo, len(beta_set))), np.zeros((MonteCarlo, len(beta_set))), np.zeros((MonteCarlo, len(beta_set)))\n# cost_off, time_off, energy_off = np.zeros((MonteCarlo, len(beta_set))), np.zeros((MonteCarlo, len(beta_set))), np.zeros((MonteCarlo, len(beta_set)))\n#\n# for idx in range(len(beta_set)):\n# optimizer = Optimization(beta_set[idx])\n# local, offloading = np.zeros(K), np.ones(K)\n#\n# for cyc in range(MonteCarlo):\n#\n# cost_loc[cyc, idx], time_loc[cyc, idx], energy_loc[cyc, idx] = optimizer.optimize(channel[cyc, :], local)\n# cost_off[cyc, idx], time_off[cyc, idx], energy_off[cyc, idx] = optimizer.optimize(channel[cyc, :], offloading)\n#\n# if cyc % (MonteCarlo // 10) == 0:\n# print(cyc)\n#\n# print('***', beta_set[idx], np.mean(cost_loc[:, idx]), np.mean(cost_off[:, idx]))\n#\n# total_time = time.time() - start_time\n# print('Total time consumed:%s' % total_time)\n# print('Average time per channel:%s' % (total_time / MonteCarlo))\n#\n# np.savetxt('../data_new/LocalComputing_beta_VS_time_MonteCarlo1000_K8_Distance10_convex.txt', time_loc)\n# np.savetxt('../data_new/LocalComputing_beta_VS_cost_MonteCarlo1000_K8_Distance10_convex.txt', cost_loc)\n# np.savetxt('../data_new/LocalComputing_beta_VS_energy_MonteCarlo1000_K8_Distance10_convex.txt', energy_loc)\n#\n# np.savetxt('../data_new/Offloading_beta_VS_time_MonteCarlo1000_K8_Distance10_convex.txt', time_off)\n# np.savetxt('../data_new/Offloading_beta_VS_cost_MonteCarlo1000_K8_Distance10_convex.txt', cost_off)\n# np.savetxt('../data_new/Offloading_beta_VS_energy_MonteCarlo1000_K8_Distance10_convex.txt', energy_off)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"code/convex_beta.py","file_name":"convex_beta.py","file_ext":"py","file_size_in_byte":19047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"252048580","text":"#Importing the Libraries\nimport numpy as np\nfrom flask import Flask, request,render_template\nfrom flask_cors import CORS\nimport os\nfrom sklearn.externals import joblib\nimport pickle\nimport flask\nimport os\nimport newspaper\nfrom newspaper import Article\nimport urllib\nimport nltk\n\n\n\n\n#Loading Flask and assigning the model flask runvariable\napp = Flask(__name__)\nCORS(app)\napp=flask.Flask(__name__,template_folder='templates')\n\n\n\n\n\nwith open('model.pickle', 'rb') as handle:\n\tmodel = pickle.load(handle)\n\n\n\n\n@app.route('/')\ndef home():\n return render_template('main.html')\n\n\n\n#Receiving the input url from the user and using Web Scrapping to extract the news content\n@app.route('/predict',methods=['GET','POST'])\ndef predict():\n res = \"Fake\"\n url =request.get_data(as_text=True)[5:]\n url = urllib.parse.unquote(url)\n article = Article(str(url))\n article.download()\n article.parse()\n article.nlp()\n news = article.summary\n #Passing the news article to the model and returing whether it is Fake or Real\n pred = model.predict([news])\n if pred[0] == 0:\n res = \"Fake\"\n else:\n res = \"Real\"\n return render_template('main.html', prediction_text='Analyzed Result: Proned \"{}\"'.format(res))\n \n\n\n\n\n@app.route('/profile/')\ndef hello(name=None):\n return render_template('profile.html', name=name)\n\n@app.route('/about/')\ndef about():\n return render_template('about.html')\n\n@app.route('/SpuriousTool/')\ndef about1():\n os.system('PLAY.vbs')\n return render_template('about.html')\n\nif __name__ == '__main__':\n port=int(os.environ.get('PORT',5000))\n app.run(port=port,debug=True,use_reloader=False)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"527261022","text":"import httpretty\nimport random\nimport unittest\nfrom os.path import join, dirname\n\nfrom app import create_app\nfrom app.db import db\n\n\nMOCK_RESPONSES = {\n '200': {\n 'status': 200,\n }\n}\n\n# Directory containing mock response data\nMOCK_RESPONSE_DATA = join(dirname(__file__), 'mock')\n\n\nclass AppTests(unittest.TestCase):\n\n def _mock_response(self, name):\n \"\"\"\n Load a named response.\n \"\"\"\n if name not in MOCK_RESPONSES:\n raise KeyError('Unknown mock: %r' % name)\n\n response = MOCK_RESPONSES[name]\n\n # Get samples of data\n samples = open(join(MOCK_RESPONSE_DATA, '%s.txt' % name)).readlines()\n\n # Load a random response\n response['body'] = random.choice(samples)\n\n return httpretty.Response(**response)\n\n def setUp(self):\n self.app = create_app(config='config/test.py')\n db.create_all(app=self.app)\n\n @httpretty.activate\n def test_get_tweet(self):\n \"\"\"\n Test loading a tweet from the API.\n \"\"\"\n import json\n from app.api import TweetsApi\n\n # Set up mock response\n mock_response = self._mock_response('200')\n httpretty.register_uri(httpretty.GET, self.app.config['API_URL'],\n responses=[mock_response])\n\n # Fetch tweet\n api = TweetsApi(self.app.config['API_URL'])\n tweet_json = api.fetch()\n\n # Reset httpretty\n httpretty.reset()\n\n return self.assertEqual(json.loads(mock_response.body), tweet_json)\n\n def test_seen_increment(self):\n \"\"\"\n Tweet increment should always increment by one.\n \"\"\"\n from app.coke import Tweet\n\n tweet = Tweet()\n tweet.seen()\n\n return self.assertEqual(tweet.seen_count, 1)\n\n def test_index_page(self):\n \"\"\"\n Index page should be returned.\n \"\"\"\n with self.app.test_client() as client:\n res = client.get('/')\n return self.assertEqual(res.status_code, 200)\n","sub_path":"app/tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"643197407","text":"# coding: utf-8\n\nimport numpy as np\nfrom Optimization.Optimizer import *\nfrom Utils.utils import dot, sqrt\n\n\nclass RMSProp(Optimizer):\n \"\"\"\n Adaptive gradient descent with delta window\n \"\"\"\n g_b = dict()\n g_w = dict()\n v_b = dict()\n v_w = dict()\n\n def __init__(self, learning_rate, beta2=0.999, epsilon=1e-6):\n Optimizer.__init__(self, learning_rate)\n self.epsilon = epsilon\n self.beta2 = beta2\n\n def reset(self):\n self.g_w = dict()\n self.g_b = dict()\n\n def execution(self, model, mini_batch):\n i = 0\n self.reset()\n for x, y in mini_batch:\n a = model.feed_forward(i, x)\n c = model.cost(a, y)\n model.layers.reverse()\n w_delta = 1 * c\n for layer in model.layers:\n w_delta = self.g(i, layer, w_delta)\n model.layers.reverse()\n i += 1\n for layer in model.layers:\n v_b = self.v_b[layer.name] if layer.name in self.v_b else None\n v_w = self.v_w[layer.name] if layer.name in self.v_b else None\n self.v_b[layer.name] = self.V(v_b, self.g_b[layer.name] / len(mini_batch))\n self.v_w[layer.name] = self.V(v_w, self.g_w[layer.name] / len(mini_batch))\n eta_w = self.eta(self.learning_rate, self.g_w[layer.name], self.v_w[layer.name])\n eta_b = self.eta(self.learning_rate, self.g_b[layer.name], self.v_b[layer.name])\n layer.update_args(- eta_w, - eta_b)\n\n def g(self, i, layer, w_delta):\n db = w_delta * layer.activation.prime(layer.zs[i])\n self.g_b[layer.name] = self.g_b[layer.name] + db if layer.name in self.g_b else db\n dw = dot(db, layer.xs[i].transpose())\n self.g_w[layer.name] = self.g_w[layer.name] + dw if layer.name in self.g_w else dw\n return dot(layer.weights.transpose(), db)\n\n def V(self, v, g):\n V_square = g**2\n if v is not None:\n V = self.beta2 * v + (1 - self.beta2) * V_square\n else:\n V = (1 - self.beta2) * V_square\n return V\n\n def eta(self, alpha, g, V):\n V += self.epsilon\n return alpha * g / np.sqrt(V)\n","sub_path":"Optimization/RMSProp.py","file_name":"RMSProp.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"78341188","text":"import unittest\r\nfrom protocols.ipp.wrapper import IPP\r\n\r\n\r\nclass MyTestCase(unittest.TestCase):\r\n\r\n def setUp(self):\r\n self.host = \"192.168.80.78\"\r\n self.ipp = IPP(self.host)\r\n\r\n @staticmethod\r\n def get_printer_attributes():\r\n operation_attributes_tag = {\r\n \"attributes-charset\": \"utf-8\",\r\n \"attributes-natural-language\": \"en-us\",\r\n \"printer-uri\": \"ipp://192.168.80.78/ipp/print\",\r\n \"requesting-user-name\": \"SVA-Automation\",\r\n }\r\n\r\n _get_printer_attributes = {\r\n \"operation-attributes-tag\": operation_attributes_tag,\r\n }\r\n\r\n return _get_printer_attributes\r\n\r\n def test_printer_attributes(self):\r\n first_line, headers, body = self.ipp.send(\r\n op_type=\"Get-Printer-Attributes\", version=1.0, **self.get_printer_attributes()\r\n )\r\n\r\n status_code_actual_value = body['status-code']\r\n status_code_accepted_value = \"successful-ok\"\r\n\r\n self.assertEqual(status_code_actual_value, status_code_accepted_value)\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","sub_path":"example_1.py","file_name":"example_1.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"124370210","text":"from qautils.gppylib.commands.base import Command\nimport os\nimport tinctest\n\n# ----------------------------------------------------------------------\n\nclass Gpdiff1(Command):\n \"\"\"This is a wrapper for gpdiff.pl.\"\"\"\n def __init__(self, out_file, ans_file, gp_ignore = True, ignore_header = True, match_sub = []):\n cmd_str = 'gpdiff.pl -w -B -I CONTEXT: -I NOTICE: '\n if ignore_header:\n cmd_str += ' -I GP_IGNORE: -gpd_ignore_headers'\n elif gp_ignore:\n cmd_str += ' -I GP_IGNORE:'\n cmd_str += ' -gpd_init %s/global_init_file' % (os.path.abspath( os.path.dirname( __file__ ) ))\n if match_sub:\n cmd_str += ' -gpd_init '\n cmd_str += ' -gpd_init '.join(match_sub)\n cmd_str += ' %s %s' % (out_file, ans_file)\n Command.__init__(self, 'run gpdiff', cmd_str)\n\n @staticmethod\n def are_files_equal(out_file, ans_file, gp_ignore = True, ignore_header = True, match_sub = []):\n \"\"\"Return True/False after comparing out_file and ans_file.\"\"\"\n diff_file = out_file.replace('.out','.diff')\n cmd = Gpdiff1(out_file, ans_file, gp_ignore, ignore_header, match_sub)\n tinctest.logger.info(\"Running gpdiff command - %s\" %cmd)\n cmd.run()\n result = cmd.get_results()\n if result.rc != 0:\n tinctest.logger.error(\"gpdiff failed. Look into the diff file %s for more information\" %diff_file)\n with open(diff_file, 'w') as dfile:\n dfile.write(result.stdout)\n return False\n return True\n\n\n\n\n","sub_path":"src/template/gpdiff/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"620406084","text":"\"\"\"\n爬虫相关包的使用\n\"\"\"\nimport re\nfrom bs4 import BeautifulSoup\n\nhtml_doc = \"\"\"\nThe Dormouse's story\n\n

The Dormouse's story

\n

Once upon a time there were three little sisters; and their names were\nElsie,\nLacie and\nTillie;\nand they lived at the bottom of a well.

\n

...

\n\"\"\"\n\nif __name__ == '__main__':\n # 创建一个BeautifulSoup解析对象\n soup = BeautifulSoup(html_doc, \"html.parser\")\n # 获取所有a标签\n links = soup.find_all('a')\n print('所有链接')\n for link in links:\n print(link.name, link['href'], link.get_text())\n\n print('获取所有的链接')\n link_node = soup.find('a', href='http://example.com/elsie')\n print(link_node.name, link_node['href'], link_node['class'], link_node.get_text())\n\n print('正则表达式匹配')\n link_node = soup.find('a', href=re.compile(r'ti'))\n print(link_node.name, link_node['href'], link_node['class'], link_node.text)","sub_path":"MyPython/spider/spider_study.py","file_name":"spider_study.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"312054790","text":"from bluetooth import *\nimport argparse\n\nparser = argparse.ArgumentParser(description='Process some integers.')\nparser.add_argument('--msg', type=str, help='message to send by bluetooth')\nargs = parser.parse_args()\n\nBLUETOOTH_PAIR_KEY = \"98:D3:71:F9:B8:8B\" # LED\n\nif __name__ == '__main__':\n socket = BluetoothSocket( RFCOMM )\n socket.connect((BLUETOOTH_PAIR_KEY, 1))\n print(\"[{0}] LED Control Commend: {1}\".format(BLUETOOTH_PAIR_KEY, args.msg))\n socket.send(args.msg)\n socket.close()","sub_path":"IOTOPUS/webapp/sendLEDCommend.py","file_name":"sendLEDCommend.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"201606452","text":"from keras.models import Sequential\nfrom keras.models import load_model\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\n\nimport numpy as np\nimport random\nfrom collections import deque\n\n\nclass Agent:\n def __init__(self, n_features, use_exploration=False, name_model=\"\", random_action_min=0.1,\n random_action_decay=0.999995, n_neurons=64, future_reward_importance=0.95):\n self.memory = deque(maxlen=100000)\n self.model_name = name_model\n self.use_exploration = use_exploration\n self.actions = ['hold', 'buy', 'sell']\n self.action_size = len(self.actions)\n self.gamma = future_reward_importance # discount rate, determines the importance of future rewards.\n # gamma=0 agent learns to consider current rewards, =1 strives for a long-term high reward\n self.epsilon = 1.0 # exploration rate\n self.epsilon_min = random_action_min # we want the agent to explore at least this amount.\n self.epsilon_decay = random_action_decay # we want to decrease the number of explorations as it gets good\n self.num_trains = 0\n self.num_neurons = n_neurons\n self.num_features = n_features # normalized previous days\n self.model = self._nn_old(name_model) if name_model != '' else self._nn_new(n_features, n_neurons)\n self.model.summary();\n\n def _nn_old(self, name_model=''):\n model = load_model(\"files/output/\" + name_model);\n return model;\n\n def _nn_new(self, n_features, n_neurons):\n model = Sequential()\n model.add(Dense(units=np.maximum(int(n_neurons/ 1), 1), activation=\"relu\", input_dim=n_features))\n model.add(Dense(units=np.maximum(int(n_neurons/ 2), 1), activation=\"relu\"))\n model.add(Dense(units=np.maximum(int(n_neurons/ 8), 1), activation=\"relu\"))\n model.add(Dense(units=self.action_size, activation=\"linear\"))\n model.compile(loss=\"mse\", optimizer=Adam(lr=0.001))\n return model\n\n def remember(self, state, action, reward, next_state, done):\n self.memory.append((state, action, reward, next_state, done))\n\n # best action is a tradeoff bw predicting based on past(exploitation) and by exploration randomly:\n # letting the model predict the action of current state based on the data you trained\n def choose_best_action(self, state):\n\n # exploring from time to time\n if self.use_exploration == True:\n prob_exploit = np.random.rand()\n if prob_exploit < self.epsilon:\n random_action = random.randrange(self.action_size)\n return random_action\n # exploiting = predicting\n pred = self.model.predict(state)\n best_action = np.argmax(pred[0])\n # print(f'best_action found by predicting={self.actions[best_action]}')\n return best_action\n\n # fit model based on data x,y: y=reward, x=state, action\n # This training process makes the neural net to predict the action to do based on specific state.\n # using experience replay memory.\n def experience_replay(self, batch_size):\n memory_batch = self.prepare_mem_batch(batch_size)\n\n for curr_state, action, reward, next_state, done in memory_batch:\n #print(f'curr_state={curr_state}, next_state={next_state}, reward={reward}, action ={action}')\n if not done:\n # predict the future discounted reward\n reward_pred = self.model.predict(next_state) # [0, 0, 0.0029] target=0.0036\n # maximum future reward for this state and action (s,a) is the immediate reward r plus maximum future reward for the next state\n target = reward + self.gamma * np.amax(reward_pred[0])\n # the bellman equation for discounted future rewards. https://www.youtube.com/watch?v=8vBXARV_ufk\n else:\n target = reward\n # make the agent to approximately map the current state to future discounted reward - y_f\n y_f = self.model.predict(curr_state)\n y_f[0][action] = target # only chosen action value will change\n self.model.fit(curr_state, y_f, epochs=1, verbose=0)\n self.num_trains += 1\n\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay\n # print(f'epsilon={self.epsilon}')\n\n # increases learning speed with mini-batches\n def prepare_mem_batch(self, mini_batch_size):\n mini_batch = []\n # mini_batch = random.sample(self.memory, batch_size)#sample is not a good choice in timeseries data\n l = len(self.memory)\n for i in range(l - mini_batch_size, l):\n mini_batch.append(self.memory[i])\n return mini_batch\n","sub_path":"ai_agent.py","file_name":"ai_agent.py","file_ext":"py","file_size_in_byte":4737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"351670111","text":"import os\r\n\r\nimport cv2\r\nimport imutils\r\nimport numpy as np\r\nimport pytesseract\r\nimport matplotlib.pyplot as plt\r\n\r\npytesseract.pytesseract.tesseract_cmd = 'Tesseract-OCR\\\\tesseract.exe'\r\nstates = {\"A\": \"Perak\", \"B\": \"Selangor\", \"C\": \"Pahang\", \"D\": \"Kelantan\", \"F\": \"Putrajaya\", \"J\": \"Johor\", \"K\": \"Kedah\",\r\n \"M\": \"Malacca\", \"N\": \"Negeri Sembilan\", \"P\": \"Penang\", \"R\": \"Perlis\", \"T\": \"Terengganu\", \"V\": \"Kuala Lumpur\",\r\n \"W\": \"Kuala Lumpur\", \"S\": \"Sabah\", \"L\": \"Labuan\", \"Q\": \"Sarawak\", \"Z\": \"Military\", \"U\": \"University\"}\r\n\r\n# Read the image file\r\nimg = cv2.imread('Dataset\\T21.jpg', cv2.IMREAD_COLOR)\r\n\r\n# Resize the image file\r\nimg = cv2.resize(img, (600, 400))\r\ncv2.imshow('original',img)\r\n\r\n# Convert to Grayscale Image\r\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n# Removing Noise from the detected image, before sending to Tesseract\r\ngray = cv2.bilateralFilter(gray, 13, 15, 15)\r\n\r\n# Canny Edge Detection\r\nedged = cv2.Canny(gray, 30, 200)\r\n# Find contours based on Edges\r\ncontours = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\ncontours = imutils.grab_contours(contours)\r\ncontours = sorted(contours, key=cv2.contourArea, reverse=True)[:10]\r\nscreenCnt = None\r\n\r\nfor c in contours:\r\n\r\n peri = cv2.arcLength(c, True)\r\n approx = cv2.approxPolyDP(c, 0.018 * peri, True)\r\n # see whether it is a Rect\r\n if len(approx) == 4:\r\n screenCnt = approx\r\n break\r\n\r\nif screenCnt is None:\r\n detected = 0\r\n print(\"No contour detected\")\r\nelse:\r\n detected = 1\r\n\r\nif detected == 1:\r\n cv2.drawContours(img, [screenCnt], -1, (0, 0, 255), 3)\r\n\r\nmask = np.zeros(gray.shape, np.uint8)\r\nnew_image = cv2.drawContours(mask, [screenCnt], 0, 255, -1, )\r\nnew_image = cv2.bitwise_and(img, img, mask=mask)\r\n\r\n(x, y) = np.where(mask == 255)\r\n(topx, topy) = (np.min(x), np.min(y))\r\n(bottomx, bottomy) = (np.max(x), np.max(y))\r\nCropped = gray[topx:bottomx + 1, topy:bottomy + 1]\r\n\r\ntext = pytesseract.image_to_string(Cropped, config='--psm 11')\r\nstat = text[0:1]\r\ntry:\r\n print(\"This car from : \", states[stat])\r\nexcept:\r\n print(\"This car cannot detect from where\")\r\nprint(\"\\nprogramming_fever's License Plate Recognition\\n\")\r\nprint(\"Detected license plate Number is:\", text)\r\nimg = cv2.resize(img, (500, 300))\r\nCropped = cv2.resize(Cropped, (400, 200))\r\n\r\n\r\ncv2.imshow('1. Car Image Resize', img)\r\ncv2.imshow('2. Blur', gray)\r\ncv2.imshow('3. edged', edged)\r\ncv2.imshow('4. mask', new_image)\r\ncv2.imshow('5. Cropped', Cropped)\r\n\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n","sub_path":"PlateRecognation.py","file_name":"PlateRecognation.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"640031098","text":"# diagrams as code vía https://diagrams.mingrammer.com\nfrom diagrams import Diagram, Cluster, Diagram, Edge, Node\nfrom diagrams.custom import Custom\nfrom diagrams.aws.general import General\nfrom diagrams.aws.management import Cloudtrail\nfrom diagrams.aws.storage import S3, SimpleStorageServiceS3Bucket\nfrom diagrams.aws.integration import SNS\nfrom diagrams.aws.integration import SQS\nfrom diagrams.aws.compute import ECS, ElasticContainerServiceService, ECR\nfrom diagrams.aws.security import IAMRole,IAM\nfrom diagrams.aws.management import Cloudwatch\nfrom diagrams.aws.devtools import Codebuild\nfrom diagrams.aws.management import SystemsManager\n\n\ndiagram_attr = {\n \"pad\":\"1.25\"\n}\n\n\nrole_attr = {\n \"imagescale\":\"true\",\n \"width\":\"2\",\n \"fontsize\":\"13\",\n}\n\n\ncolor_event=\"firebrick\"\ncolor_scanning = \"dark-green\"\ncolor_permission=\"steelblue3\"\ncolor_non_important=\"gray\"\ncolor_sysdig=\"lightblue\"\n\nwith Diagram(\"Sysdig Secure for Cloud{}(single-account-ecs)\".format(\"\\n\"), graph_attr=diagram_attr, filename=\"diagram-single\", show=True):\n\n public_registries = Custom(\"Public Registries\",\"../../resources/diag-registry-icon.png\")\n\n\n with Cluster(\"AWS single-account-ecs\"):\n\n master_credentials = IAM(\"credentials \\npermissions: cloudtrail, role creation,...\", fontsize=\"10\")\n\n with Cluster(\"other resources\", graph_attr={\"bgcolor\":\"lightblue\"}):\n account_resources = [General(\"resource-1..n\")]\n ecr = ECR(\"container-registry\\n*sends events on image push to cloudtrail\\n*within any account\")\n\n with Cluster(\"ecs-cluster\"):\n ecs_services = ElasticContainerServiceService(\"other services\\n*sends events with image runs to cloudtrail\")\n\n with Cluster(\"sysdig-secure-for-cloud resources\"):\n\n # cloudtrail\n cloudtrail = Cloudtrail(\"cloudtrail\\n* ingest events from all\\norg member accounts+managed\", shape=\"plaintext\")\n# cloudtrail_legend = (\"for clarity purpose events received from sysdig-secure-for-cloud resources\\nhave been removed from diagram, but will be processed too\")\n# Node(label=cloudtrail_legend, width=\"5\",shape=\"plaintext\", labelloc=\"t\", fontsize=\"10\")\n\n cloudtrail_s3 = S3(\"cloudtrail-s3-events\")\n sns = SNS(\"cloudtrail-sns-events\", comment=\"i'm a graph\")\n cloudwatch = Cloudwatch(\"cloudwatch\\n(logs and alarms)\")\n\n\n cloudtrail >> Edge(color=color_event, style=\"dashed\") >> cloudtrail_s3\n cloudtrail >> Edge(color=color_event, style=\"dashed\") >> sns\n\n with Cluster(\"ecs-cluster\"):\n cloud_connector = ElasticContainerServiceService(\"cloud-connector\")\n\n sqs = SQS(\"cloudtrail-sqs\")\n sqs << Edge(color=color_event) << cloud_connector\n cloud_connector >> Edge(color=color_non_important) >> cloudwatch\n\n # scanning\n codebuild = Codebuild(\"CodeBuild Project\")\n cloud_connector >> Edge(color=color_non_important) >> cloudwatch\n cloud_connector >> codebuild\n codebuild >> Edge(color=color_non_important) >> ecr\n codebuild >> Edge(color=color_non_important) >> public_registries\n\n\n # bench-role\n cloud_bench_role = IAMRole(\"SysdigCloudBench\\n(aws:SecurityAudit policy)\", **role_attr)\n\n #account_resources >> Edge(color=color_event, style=\"dashed\") >> cloudtrail\n sns >> Edge(color=color_event, style=\"dashed\") >> sqs\n (cloudtrail_s3 << Edge(color=color_non_important)) - cloud_connector\n\n with Cluster(\"AWS account (sysdig)\"):\n sds = Custom(\"Sysdig Secure\\n*receives cloud-connector and cloud-build results\\n*assumeRole on SysdigCloudBench\", \"../../resources/diag-sysdig-icon.png\")\n sds_account = General(\"cloud-bench\")\n sds - Edge(label=\"aws_foundations_bench\\n schedule on rand rand * * *\") >> sds_account\n\n\n cloud_connector >> Edge(color=color_sysdig) >> sds\n codebuild >> Edge(color=color_sysdig) >> sds\n sds_account >> Edge(color=color_permission, fontcolor=color_permission) >> cloud_bench_role\n","sub_path":"examples/single-account-ecs/diagram-single.py","file_name":"diagram-single.py","file_ext":"py","file_size_in_byte":4121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"452596595","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 17 16:17:25 2017\n\n@author: jorgemauricio\n\"\"\"\n\n# librerias\nimport numpy as np\n\n# hacer una lista\narr1 = [1,2,3,4]\n\n# crear el arreglo de numpy\nmyArr1 = np.array(arr1)\n\n#Imprimir\nmyArr1\n\n# hacer otra lista\narr2 = [11,22,33,44]\n\n# hacer una lista de listas\nmyList = [arr1,arr2]\n\n# hacer un arreglomultimimencional\nmyArr2 = np.array(myList)\n\n# desplegar arreglo\nmyArr2\n\n# obtener la dimension del arreglo\nmyArr2.shape\n\n# desplegar el tipy de datos del arreglo \nmyArr2.dtype\n\n# hacer arreglos especiales \n# ceros\nnp.zeros(5)\n\n# unos\nnp.ones((5,5))\n\n# vacio\nnp.empty(5)\nnp.empty((3,4))\n\n# arreglo de identidad\nnp.eye(5)\n\n# Using a range\nnp.arange(5)\n","sub_path":"ejercicios/ej_1_create_arrays.py","file_name":"ej_1_create_arrays.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"279956451","text":"from __future__ import print_function\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM, SimpleRNN\nfrom sklearn.metrics import mean_squared_error\n\n\n# Create model\ndef create_rnn_model(length, stateful):\n\t##### YOUR MODEL GOES HERE #####\n\tmodel = Sequential()\n\tmodel.add(SimpleRNN(20, activation='relu', stateful=stateful, batch_input_shape=(1, length, 1)))\n\tmodel.add(Dense(1))\n\t# model.summary()\n\tmodel.compile(loss='mean_squared_error', optimizer='adam')\n\treturn model\n\n\n# split train/test data\ndef split_data(x, y, ratio=0.8):\n\tto_train = int(len(x.index) * ratio)\n\t# tweak to match with batch_size\n\tto_train -= to_train % batch_size\n\n\tx_train = x[:to_train]\n\ty_train = y[:to_train]\n\tx_test = x[to_train:]\n\ty_test = y[to_train:]\n\n\t# tweak to match with batch_size\n\tto_drop = x.shape[0] % batch_size\n\tif to_drop > 0:\n\t\tx_test = x_test[:-1 * to_drop]\n\t\ty_test = y_test[:-1 * to_drop]\n\n\t# some reshaping\n\t##### RESHAPE YOUR DATA BASED ON YOUR MODEL #####\n\tx_train = np.array(x_train)\n\tx_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))\n\n\ty_train = np.array(y_train)\n\n\tx_test = np.array(x_test)\n\tx_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))\n\n\ty_test = np.array(y_test)\n\n\treturn (np.array(x_train), np.array(y_train)), (np.array(x_test), np.array(y_test))\n\n\ndef plot_loss(train_loss, test_loss, length, state):\n\tplt.figure()\n\tplt.title(f\"{state} RNN Model Loss VS Iterations for L = {length}\")\n\tplt.plot(np.arange(1, 11), train_loss, c='black', label='train')\n\tplt.plot(np.arange(1, 11), test_loss, c='red', label='test')\n\tplt.xlabel('iterations')\n\tplt.ylabel('losses')\n\tplt.legend()\n\tplt.grid()\n\tplt.savefig(f\"{path}/Loss-vs-Iteration-RNN-{state}-L{length}.png\")\n\tplt.show()\n\n\n# training parameters passed to \"model.fit(...)\"\nbatch_size = 1\nepochs = 10\n\n# The input sequence min and max length that the model is trained on for each output point\nmin_length = 1\nmax_length = 10\n\n# load data from files\nnoisy_data = np.loadtxt('../filter_data/noisy_data.txt', delimiter='\\t', dtype=np.float)\nsmooth_data = np.loadtxt('../filter_data/smooth_data.txt', delimiter='\\t', dtype=np.float)\npath = \"../result/part2\"\n\nprint('noisy_data shape:{}'.format(noisy_data.shape))\nprint('smooth_data shape:{}'.format(smooth_data.shape))\nprint('noisy_data first 5 data points:{}'.format(noisy_data[:5]))\nprint('smooth_data first 5 data points:{}'.format(smooth_data[:5]))\n\n\n# List to keep track of root mean square error for different length input sequences\nrnn_stateful_rmse_list = list()\nrnn_stateless_rmse_list = list()\n\nfor num_input in range(min_length, max_length+1):\n\tlength = num_input\n\n\tprint(\"*\" * 33)\n\tprint(\"INPUT DIMENSION:{}\".format(length))\n\tprint(\"*\" * 33)\n\n\t# convert numpy arrays to pandas dataframe\n\tdata_input = pd.DataFrame(noisy_data)\n\texpected_output = pd.DataFrame(smooth_data)\n\n\t# when length > 1, arrange input sequences\n\tif length > 1:\n\t\t##### ARRANGE YOUR DATA SEQUENCES #####\n\t\texpected_output = pd.DataFrame(smooth_data[length - 1:])\n\t\ttransformed_input = []\n\t\tfor num in range(0, data_input.shape[0] - length + 1):\n\t\t\tdata_set = np.asarray(noisy_data[num:num + length])\n\t\t\ttransformed_input.append(data_set)\n\t\tdata_input = pd.DataFrame(np.flip(transformed_input, axis=1))\n\n\tprint('data_input length:{}'.format(len(data_input.index)))\n\n\t# Split training and test data: use first 80% of data points as training and remaining as test\n\t(x_train, y_train), (x_test, y_test) = split_data(data_input, expected_output)\n\tprint('x_train.shape: ', x_train.shape)\n\tprint('y_train.shape: ', y_train.shape)\n\tprint('x_test.shape: ', x_test.shape)\n\tprint('y_test.shape: ', y_test.shape)\n\n\tprint('Input shape:', data_input.shape)\n\tprint('Output shape:', expected_output.shape)\n\tprint('Input head: ')\n\tprint(data_input.head())\n\tprint('Output head: ')\n\tprint(expected_output.head())\n\tprint('Input tail: ')\n\tprint(data_input.tail())\n\tprint('Output tail: ')\n\tprint(expected_output.tail())\n\t\n\t# Create the stateful model\n\tprint('Creating Stateful Vanilla RNN Model...')\n\tmodel_rnn_stateful = create_rnn_model(length, stateful=True)\n\n\t# Train the model\n\tprint('Training')\n\tstateful_train_loss = []\n\tstateful_test_loss = []\n\tfor i in range(epochs):\n\t\tprint('Epoch', i + 1, '/', epochs)\n\t\t# Note that the last state for sample i in a batch will\n\t\t# be used as initial state for sample i in the next batch.\n\t\t\n\t\t##### TRAIN YOUR MODEL #####\n\t\tstateful_history = model_rnn_stateful.fit(x=x_train, y=y_train, epochs=1, batch_size=batch_size, verbose=2, shuffle=False, validation_data=(x_test, y_test))\n\t\tstateful_train_loss.append(stateful_history.history['loss'][0])\n\t\tstateful_test_loss.append(stateful_history.history['val_loss'][0])\n\n\t\t# reset states at the end of each epoch\n\t\tmodel_rnn_stateful.reset_states()\n\n\n\t# Plot and save loss curves of training and test set vs iteration in the same graph\n\t##### PLOT AND SAVE LOSS CURVES #####\n\tplot_loss(stateful_train_loss, stateful_test_loss, length, 'stateful')\n\n\t# Save your model weights with following convention:\n\t# For example length 1 input sequences model filename\n\t# rnn_stateful_model_weights_length_1.h5\n\t##### SAVE MODEL WEIGHTS #####\n\tfilename = f'rnn_stateful_model_weights_length_{length}.h5'\n\tmodel_rnn_stateful.save_weights(filename)\n\n\t# Predict \n\tprint('Predicting')\n\t##### PREDICT #####\n\tpredicted_rnn_stateful = model_rnn_stateful.predict(x=x_test, batch_size=batch_size)\n\n\t##### CALCULATE RMSE #####\n\trnn_stateful_rmse = np.sqrt(mean_squared_error(predicted_rnn_stateful, y_test))\n\trnn_stateful_rmse_list.append(rnn_stateful_rmse)\n\n\t# print('tsteps:{}'.format(tsteps))\n\tprint('length:{}'.format(length))\n\tprint('Stateful Vanilla RNN RMSE:{}'.format( rnn_stateful_rmse ))\n\n\n\n\t# Create the stateless model\n\tprint('Creating stateless Vanilla RNN Model...')\n\tmodel_rnn_stateless = create_rnn_model(length, stateful=False)\n\n\t# Train the model\n\tprint('Training')\n\t# stateless_train_loss = []\n\t# stateless_test_loss = []\n\t# for i in range(epochs):\n\t# \tprint('Epoch', i + 1, '/', epochs)\n\t# \t# Note that the last state for sample i in a batch will\n\t# \t# be used as initial state for sample i in the next batch.\n\t#\n\t# \t##### TRAIN YOUR MODEL #####\n\t# \tstateless_history = model_rnn_stateless.fit(x=x_train, y=y_train, epochs=1, batch_size=1, verbose=0, shuffle=False)\n\t# \tstateless_loss = model_rnn_stateless.evaluate(x=x_test, y=y_test, batch_size=batch_size)\n\t# \tstateless_train_loss.append(stateless_history.history['loss'][0])\n\t# \tstateless_test_loss.append(stateless_loss)\n\t#\n\t# \t# reset states at the end of each epoch\n\t# \tmodel_rnn_stateless.reset_states()\n\tstateless_history = model_rnn_stateless.fit(x=x_train, y=y_train, epochs=epochs, batch_size=1, verbose=2, shuffle=False, validation_data=(x_test, y_test))\n\n\t# Plot and save loss curves of training and test set vs iteration in the same graph\n\t##### PLOT AND SAVE LOSS CURVES #####\n\t# plot_loss(stateless_train_loss, stateless_test_loss, length, 'stateless')\n\tplot_loss(stateless_history.history['loss'], stateless_history.history['val_loss'], length, 'stateless')\n\n\t# Save your model weights with following convention:\n\t# For example length 1 input sequences model filename\n\t# rnn_stateless_model_weights_length_1.h5\n\t##### SAVE MODEL WEIGHTS #####\n\tfilename = f'rnn_stateless_model_weights_length_{length}.h5'\n\tmodel_rnn_stateless.save_weights(f'{path}/'+filename)\n\n\t# Predict \n\tprint('Predicting')\n\t##### PREDICT #####\n\tpredicted_rnn_stateless = model_rnn_stateless.predict(x=x_test, batch_size=batch_size)\n\n\t##### CALCULATE RMSE #####\n\trnn_stateless_rmse = np.sqrt(mean_squared_error(predicted_rnn_stateless, y_test))\n\trnn_stateless_rmse_list.append(rnn_stateless_rmse)\n\n\t# print('tsteps:{}'.format(tsteps))\n\tprint('length:{}'.format(length))\n\tprint('Stateless Vanilla RNN RMSE:{}'.format( rnn_stateless_rmse ))\n\n\n# save your rmse values for different length input sequence models - stateful rnn:\nfilename = 'rnn_stateful_model_rmse_values.txt'\nnp.savetxt(f'{path}/'+filename, np.array(rnn_stateful_rmse_list), fmt='%.6f', delimiter='\\t')\n\n# save your rmse values for different length input sequence models - stateless rnn:\nfilename = 'rnn_stateless_model_rmse_values.txt'\nnp.savetxt(f'{path}/'+filename, np.array(rnn_stateless_rmse_list), fmt='%.6f', delimiter='\\t')\n\nprint(\"#\" * 33)\nprint('Plotting Results')\nprint(\"#\" * 33)\n\n# Plot and save rmse vs Input Length\nplt.figure()\nplt.plot( np.arange(min_length,max_length+1), rnn_stateful_rmse_list, c='blue', label='Stateful RNN')\nplt.plot( np.arange(min_length,max_length+1), rnn_stateless_rmse_list, c='cyan', label='Stateless RNN')\nplt.title('RMSE vs Input Length in Test Set for RNN Model')\nplt.xlabel('length of input sequences')\nplt.ylabel('rmse')\nplt.legend()\nplt.grid()\nplt.savefig(f\"{path}/RMSE-vs-Input_Length-RNN.png\")\nplt.show()\n\n\n","sub_path":"part2/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":8838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"308606397","text":"# -*- coding: utf-8 -*-\nfrom atom.ext.crispy_forms.forms import SingleButtonMixin\nfrom braces.forms import UserKwargModelFormMixin\nfrom django import forms\nfrom django.utils.translation import ugettext as _\n\nfrom .models import Question, Questionary\nfrom .utils import get_modulators\n\n\nclass QuestionaryForm(SingleButtonMixin, UserKwargModelFormMixin, forms.ModelForm):\n def __init__(self, *args, **kwargs):\n self.monitoring = kwargs.pop('monitoring', None)\n super(QuestionaryForm, self).__init__(*args, **kwargs)\n if not self.user.is_superuser:\n del self.fields['lock']\n\n def save(self, *args, **kwargs):\n if self.monitoring:\n self.instance.monitoring = self.monitoring\n return super(QuestionaryForm, self).save(*args, **kwargs)\n\n class Meta:\n model = Questionary\n fields = ['title', 'lock']\n\n\nclass QuestionForm(SingleButtonMixin, UserKwargModelFormMixin, forms.ModelForm):\n def __init__(self, *args, **kwargs):\n questionary = kwargs.pop('questionary')\n super(QuestionForm, self).__init__(*args, **kwargs)\n choices = [(key, mod.description) for key, mod in get_modulators().items()]\n self.fields['genre'] = forms.ChoiceField(choices=choices, label=_(\"Genre\"))\n self.instance.questionary = questionary\n\n class Meta:\n model = Question\n fields = ['position', 'genre']\n\n\nclass QuestionDefinitionForm(SingleButtonMixin, UserKwargModelFormMixin, forms.Form):\n def __init__(self, *args, **kwargs):\n self.instance = kwargs.pop('instance')\n kwargs['initial'] = self.instance.definition\n super(QuestionDefinitionForm, self).__init__(*args, **kwargs)\n self.construct_form()\n\n def construct_form(self):\n for name, field in self.instance.modulator.list_create_question_fields():\n self.fields[name] = field\n\n def save(self):\n self.instance.definition = self.cleaned_data\n self.instance.save()\n return self.instance\n","sub_path":"feder/questionaries/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"88514615","text":"\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport time\n\n#change my name to your username\ndriver_path = \"/Users/giannaaprile/RUClassroom/chromedriver\"\nbrowser = webdriver.Chrome(driver_path)\nf = open(\"fall2019.txt\", \"w+\")\n\ndef moveInView(x):\n browser.execute_script(\"arguments[0].scrollIntoView(true);\",x)\n\nbrowser.get(\"https://sis.rutgers.edu/soc/#subjects?semester=12019&campus=NB&level=U\")\nbrowser.implicitly_wait(5)\n\ndropButton = browser.find_element_by_xpath(\"\"\"//*[@id=\"widget_dijit_form_FilteringSelect_0\"]/div[1]/input\"\"\")\ndropButton.click()\n\ndropMenu = browser.find_element_by_xpath(\"\"\"//*[@id=\"dijit_form_FilteringSelect_0_popup\"]\"\"\")\n\ndepartments = [x for x in dropMenu.find_elements_by_css_selector(\".dijitReset.dijitMenuItem\")]\n\n# Iterate through each department \ncount = 0\n\nwhile count < len(departments):\n\n time.sleep(1)\n departments = [x for x in dropMenu.find_elements_by_css_selector(\".dijitReset.dijitMenuItem\")]\n moveInView(departments[count])\n departments[count].click()\n time.sleep(5)\n\n courseList = [x for x in browser.find_elements_by_css_selector(\".courseExpandIcon\")]\n\n # Iterate through each class in the department\n classCount = 0\n while classCount < len(courseList):\n # This can be shortened depending on wifi strength\n time.sleep(1)\n courseList = [x for x in browser.find_elements_by_css_selector(\".courseExpandIcon\")]\n moveInView(courseList[classCount])\n courseList[classCount].find_element(By.TAG_NAME, 'img').click()\n \n # Gets each sections information\n courseData = [x for x in browser.find_elements_by_css_selector(\".courseData\")]\n sectionInfoList = [x for x in courseData[classCount].find_elements_by_css_selector(\".sectionData\")]\n \n # Go through each section\n sectionCount = 0\n while sectionCount < len(sectionInfoList):\n\n time.sleep(1)\n courseData = [x for x in browser.find_elements_by_css_selector(\".courseData\")]\n sectionInfoList = [x for x in courseData[classCount].find_elements_by_css_selector(\".sectionData\")]\n moveInView(sectionInfoList[sectionCount])\n\n classesPerWeek = len([x for x in sectionInfoList[sectionCount].find_elements_by_css_selector(\".meetingTimeDay\")])\n\n \n \n # Print hours for class\n meetingCount = 0\n while meetingCount < classesPerWeek:\n\n courseData = [x for x in browser.find_elements_by_css_selector(\".courseData\")]\n sectionInfoList = [x for x in courseData[classCount].find_elements_by_css_selector(\".sectionData\")]\n \n dayList = [x for x in sectionInfoList[sectionCount].find_elements_by_css_selector(\".meetingTimeDay\")]\n hourList = [x for x in sectionInfoList[sectionCount].find_elements_by_css_selector(\".meetingTimeHours\")]\n campusList = [x for x in sectionInfoList[sectionCount].find_elements_by_css_selector(\".meetingTimeCampus\")]\n buildingList = [x for x in sectionInfoList[sectionCount].find_elements_by_css_selector(\".meetingTimeBuildingAndRoom\")]\n\n print(dayList[meetingCount].text, hourList[meetingCount].text, campusList[meetingCount].text, buildingList[meetingCount].text)\n newEntry = dayList[meetingCount].text + ' ' + hourList[meetingCount].text + ' ' + campusList[meetingCount].text + ' ' + buildingList[meetingCount].text + \"\\n\"\n f.write(newEntry)\n\n meetingCount = meetingCount+1\n sectionCount = sectionCount+1\n\n # Close drop down to prevent loop\n moveInView(courseList[classCount])\n courseList[classCount].find_element(By.TAG_NAME, 'img').click()\n classCount = classCount+1\n\n dropButton.click()\n count = count + 1\n\n","sub_path":"courseScraper.py","file_name":"courseScraper.py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"41348676","text":"'''\nGiven an array nums of n integers, are there elements a, b, c in nums such that a + b + c = 0?\nFind all unique triplets in the array which gives the sum of zero.\n'''\n\n\nclass Solution(object):\n def threeSum(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n res = []\n n = len(nums)\n nums = sorted(nums)\n for i in range(n-2):\n if i > 0 and nums[i] == nums[i-1]:\n continue\n j = i+1\n k = n-1\n new_target = -nums[i]\n while j < k:\n summ = nums[j] + nums[k]\n if summ < new_target:\n j += 1\n elif summ > new_target:\n k -= 1\n else:\n res.append([nums[i], nums[j], nums[k]])\n while j < k and nums[j+1] == nums[j]:\n j += 1\n j += 1\n while k > j and nums[k-1] == nums[k]:\n k -= 1\n k -= 1\n return res \n\ntest = Solution()\nprint(test.threeSum([-1,0,1,0]))\n\n","sub_path":"3Sum.py","file_name":"3Sum.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"150242761","text":"import random\r\nimport time\r\n\r\n\r\nprint('PM CORPORATION PRESENTS:...')\r\ntime.sleep(1) \r\nprint('CIV. WARS!!')\r\ntime.sleep(1)\r\nprint('civWars version 1.2')\r\ntime.sleep(1)\r\n#print('NEW CHANGES: better fighting ')\r\n#time.sleep(1)\r\ndef civwar():\r\n\r\n myArmy = 15\r\n invader = 15\r\n\t\r\n time.sleep(1)\r\n print('Choose a civilisation :')\r\n you = input('''Greece\r\nAmerica\r\nKorea\r\nEgypt\r\n(default is America)\r\n''')\r\n if you == \"greece\":\r\n print('You are Greek, and your leader is King Leonidus.')\r\n civ = \"Greece\"\r\n leader = \"Leonidus\"\r\n elif you == \"korea\":\r\n print('You are Korean, and your leader is Admiral Yi Sun Shin.')\r\n civ = \"Korea\"\r\n leader = \"Yi Sun Shin\"\r\n elif you == \"america\":\r\n print('You are American, and your leader is President George Washington.')\r\n civ = \"America\"\r\n leader = \"George Washington\"\r\n elif you == \"egypt\":\r\n print(\"You are Egyptian, and your leader is Queen Cleopatra.\")\r\n civ = \"Egypt\"\r\n leader = \"Cleopatra\"\r\n else:\r\n print('By default, you are American. ')\r\n civ = \"American\"\r\n leader = \"George Washington\"\r\n\r\n attacker = random.randint(1,9)\r\n if attacker < 4:\r\n print('The invaders are the Aztec.')\r\n enciv = \"Aztec\"\r\n enleader = \"Zizizangabulubinga\"\r\n elif attacker < 6:\r\n print('The invaders are the Persians.')\r\n enciv = \"Persia\"\r\n enleader = \"King Xerxes\"\r\n elif attacker < 8:\r\n print('The invaders are the Mongolians.')\r\n enciv = \"Mongolia\"\r\n enleader = \"Genghis Kahn\"\r\n else:\r\n print('The Japanese are invading.')\r\n envic = \"Japan\"\r\n enleader = \"Toyomoto Hideyoshi\"\r\n\r\n time.sleep(1)\r\n print(leader + ', You must defend ' + civ + ' from ' + enleader + ', of ' + enciv)\r\n time.sleep(1)\r\n\r\n\r\n\r\n print('You and the enemy start with 15 armies each.')\r\n time.sleep(1)\r\n\t\r\n while myArmy > 0:\r\n time.sleep(1)\r\n\r\n dice = random.randint(1,10)\r\n time.sleep(1)\r\n if dice < 3:\r\n myArmy = myArmy - 2\r\n print(enciv, ' has just destroyed 2 armies!!')\r\n elif dice < 5:\r\n myArmy = myArmy - 2\r\n invader = invader - 1\r\n print(enciv, ' has just destroyed 2 armies!')\r\n print(civ + ' has destroyed 1 army!')\r\n elif dice == 7: \r\n myArmy = myArmy - 2\r\n invader = invader - 2\r\n print ('Yikes!! ' + civ + ' and ', enciv, ' have lost 2 armies each!!')\t\t\t\t\r\n elif dice < 9:\r\n invader = invader - 2\r\n myArmy = myArmy -1\r\n print(civ + ' has destroyed 2 armies!')\r\n print(enciv, ' has destroyed 1 army!')\t\t\r\n else:\r\n invader = invader - 2\r\n print('Hoorah!! ' + civ + ' has destroyed 2 armies!!')\r\n\t\t\t\t\t\r\n print(civ + ' has ' ,myArmy , ' armies left.')\r\n print(enciv, ' has ' ,invader , ' armies left.')\r\n print('###################################################')\r\n\t\t\r\n\r\n if myArmy <= 0:\r\n print('NOOO! ', enciv, ' has conqured ' + civ + '!!')\r\n break\r\n elif invader <= 0:\r\n print('YEESS!!' + civ + ' has repelled the ', enciv, '!!')\r\n break\r\n\r\nplayAgain = \"yes\"\r\nwhile playAgain == \"yes\" or playAgain == \"y\":\r\n \r\n civwar()\r\n \r\n print('Play again if you wish. ')\r\n print('Type \"yes\" or \"y\" to play again.')\r\n playAgain = input()\r\n\r\n","sub_path":"C2/games/not_working/civwar.py","file_name":"civwar.py","file_ext":"py","file_size_in_byte":4146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"163219087","text":"#!/usr/bin/env phthon3\n# -*- coding: utf-8 -*-\n\nimport requests\nimport os\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlretrieve\n\nclass learning:\n\n\tdef __init__(self):\n\t\tself.url = 'http://www.mzitu.com/all'\n\t\tself.user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'\n\t\tself.headers = { 'User-Agent' : self.user_agent }\n\n\n\tdef initpage(self):\n\t\treturn requests.get(self.url, headers = self.headers)\n\n\n\tdef geturl(self,page):\n\t\tsoup = BeautifulSoup(page.text, 'lxml')\n\t\treturn soup.select(\"p a\")\n\t\t#print(type(links[0]))\n\n\n\n\tdef download(self, imgnum, url):\n\t\tfor num in range(1,imgnum+1):\n\t\t\tpageurl = url + '/' + str(num)\n\t\t\tpage = requests.get(pageurl)\n\t\t\tsoup = BeautifulSoup(page.text, 'lxml')\n\t\t\timgurl = soup.select('p a img')[0]['src']\n\t\t\tdata = requests.get(imgurl)\n\t\t\twith open(str(num),'wb') as f:\n\t\t\t\tf.write(data.content)\n\n\n\tdef getimg(self,links):\n\t\tfor link in links:\n\t\t\turl = link['href']\n\t\t\ttitle = link.get_text()\n\t\t\tos.chdir('/home/archean/图片/')\n\t\t\tif not os.path.exists(title):\n\t\t\t\tos.mkdir(title)\n\t\t\t\tprint('创建成功')\n\t\t\tos.chdir(title)\n\t\t\tresult = requests.get(url, headers = self.headers)\n\t\t\tsoup = BeautifulSoup(result.text, 'lxml')\n\t\t\timgnum = int(soup.select('a[href] span')[-2].text)\n\t\t\tpage = result.text\n\t\t\tnum = links.index(link)\n\t\t\tprint('正在下载第%s套图' % (num + 1))\n\t\t\tself.download(imgnum, url)\n\t\t\t\n\n\n\tdef start(self):\n\t\tpage = self.initpage()\n\t\tlinks = self.geturl(page)\n\t\tself.getimg(links)\n\n\nspider = learning()\nspider.start()\n","sub_path":"learning.py","file_name":"learning.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"541740839","text":"#!/usr/bin/env python\nimport numpy as np\nimport Tools as misctools\nimport matplotlib.pyplot as plt\n\n\nclass PointBrowser:\n \"\"\"\n Controls user interaction with the Mapper plot\n \"\"\"\n def __init__(self):\n self.dragging = None\n self.line = None\n self.points = None\n self.linelats = None\n self.linelons = None\n self.drawinglines = None\n self.arrow = None\n\n def addobjs(self,mapobj,figobj):\n\n '''Add a map object so that we can plot on it'''\n\n self.mapobj = mapobj\n self.figobj = figobj\n\n def updatelinecoords(self):\n\n '''reset the box coordinates to none'''\n\n self.linelats = None\n self.linelons = None\n self.points = None\n self.line = None\n self.drawinglines = None\n\n def motion(self,event):\n\n '''define what happens when the user moves the mouse over the canvas'''\n\n lon = event.xdata\n lat = event.ydata\n\n if self.drawinglines:\n\n if self.dragging:\n\n print('Dragging!')\n #print(lon,lat\n\n if self.line:\n self.line[0].remove()\n if self.points:\n self.points[0].remove()\n if self.arrow:\n self.arrow.remove()\n\n self.linelats = [self.startlat,lat]\n self.linelons = [self.startlon,lon]\n xevent,yevent = self.mapobj(self.linelons,self.linelats)\n\n self.arrow = plt.arrow(xevent[0],yevent[0],xevent[1]-xevent[0],yevent[1]-yevent[0],fc=\"k\", ec=\"k\", linewidth = 2, head_width=3, head_length=3)\n self.line = self.mapobj.plot(xevent,yevent,'r-',linewidth=2,alpha=0.9)\n self.points = self.mapobj.plot(xevent,yevent,'ko')\n self.figobj.canvas.draw()\n\n def releasepick(self,event):\n\n '''define what happens when the user releases the cursor'''\n\n lon = event.xdata\n lat = event.ydata\n\n if self.dragging:\n\n self.dragging = None\n\n\n def returnboxcoords(self):\n\n '''return box coordinates to user'''\n\n if self.boxlats:\n\n return self.boxlats,self.boxlons\n\n\n def onpick(self, event):\n\n '''define what happens when the user presses the cursor'''\n\n if self.drawinglines:\n\n if event.button == 3:\n print('Create profile!')\n s = str(input('Create a profile? [Y/N]: '))\n s = 'Y'\n if s == 'Y':\n print('making profile!!')\n #print(self.linelats\n #print(self.linelons\n midlon = self.linelons[0]\n midlat = self.linelats[0]\n #Determine azimuth from the start point to the selected location\n azimuth = misctools.coords_for_profile(self.linelons[0],self.linelats[0],self.linelons[1],self.linelats[1])\n print('Getting ready to run Ritsema codes with midlon/midlat = %g/%g and azimuth of %g' %(midlon,midlat,azimuth))\n misctools.Ritsema_180_sections(midlon,midlat,azimuth)\n else:\n print('No profile selected. Continue')\n self.drawinglines = None\n if event.button == 1:\n print('Draw desired profile now. Click mouse 3 when done with your profile')\n self.dragging = True\n lon = event.xdata\n lat = event.ydata\n self.startlon = lon\n self.startlat = lat\n\n elif event.button == 3:\n\n print('proceeding to draw lines!')\n self.drawinglines = True\n\n elif event.button == 1:\n\n print('No drawing lines!')\n print('Use mouse 3 to start or stop drawing a profile')\n print('Use mouse 1 to ')\n\n else:\n\n print('Not a recognized command!')\n","sub_path":"slabpy-master/Browser.py","file_name":"Browser.py","file_ext":"py","file_size_in_byte":3585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"189859166","text":"import logging\r\nfrom logging.handlers import QueueHandler\r\nimport queue\r\nimport Pyro4\r\nfrom .nameserver import ports, lookup_local\r\n\r\n\r\ndef init_mission_logger(filepath):\r\n logger = logging.getLogger('mission')\r\n formatter = logging.Formatter(fmt='%(asctime)s | %(levelname)s | %(message)s', datefmt='%m/%d/%Y %H:%M:%S')\r\n\r\n q = queue.Queue()\r\n q_handler = QueueHandler(q)\r\n q_handler.setFormatter(formatter)\r\n logger.addHandler(q_handler)\r\n\r\n f_handler = logging.FileHandler(filepath)\r\n f_handler.setFormatter(formatter)\r\n logger.addHandler(f_handler)\r\n\r\n logger.setLevel(logging.INFO)\r\n\r\n\r\ndef init_robot_logger(filepath):\r\n logger = logging.getLogger('robot')\r\n formatter = logging.Formatter(fmt='%(asctime)s | %(levelname)s | %(message)s', datefmt='%m/%d/%Y %H:%M:%S')\r\n\r\n f_handler = logging.FileHandler(filepath)\r\n f_handler.setFormatter(formatter)\r\n logger.addHandler(f_handler)\r\n\r\n logger.setLevel(logging.DEBUG)\r\n\r\n\r\ndef init_mission_queue():\r\n q = queue.Queue()\r\n Pyro4.config.REQUIRE_EXPOSE = False\r\n\r\n # Create a daemon.\r\n port = ports['mission_logger']\r\n daemon = Pyro4.Daemon('localhost', port)\r\n\r\n # Register all objects.\r\n daemon.register(q, 'logger')\r\n\r\n # Start event loop.\r\n daemon.requestLoop()\r\n\r\n\r\ndef watch_mission_logger(publish_function, prefix):\r\n uri = lookup_local('mission_logger', 'logger')\r\n queue = Pyro4.Proxy(uri)\r\n while True:\r\n m = queue.get()\r\n topic = '{}.log'.format(prefix)\r\n return publish_function(topic, *m)\r\n","sub_path":"src/apex/hera/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"19514629","text":"import redis\nimport datetime\nimport time\nimport threading\n\nfrom redis.exceptions import LockError\n\n\n\"\"\"\n redis lock 相当于利用redis 实现的一个轻量级的分布式lock\n\"\"\"\n\n\ndef rds_lock_test():\n rds = redis.StrictRedis(port=6378)\n \"\"\"\n timeout: 指获取锁时的超时时间, 如果超过时间, 将会主动释放连接,导致lock 存在\n \"\"\"\n try:\n # Cannot release a lock that's no longer owned 因为设置的超时时间\n with rds.lock('my-lock-key', timeout=1, blocking_timeout=5) as lock:\n print(threading.get_ident(), \"拿到所了\")\n time.sleep(5)\n pass\n print(\"释放锁\")\n\n except LockError as e:\n print(e)\n\n\nif __name__ == '__main__':\n task1 = threading.Thread(target=rds_lock_test)\n task2 = threading.Thread(target=rds_lock_test)\n\n task1.start()\n task2.start()\n\n task1.join()\n task2.join()\n","sub_path":"pythons/rediss/04redis_lock.py","file_name":"04redis_lock.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"647688420","text":"import datetime\nimport pytest\nfrom django.utils import timezone\nfrom django.test import TestCase\nfrom django.urls import reverse\nfrom utils.models import *\nfrom core.models import *\n\n\nclass TesteEspacoFisico(TestCase):\n\n\n def test_controle_espaco_fisico(self):\n evento = Evento(nome=\"Teste\")\n atividade = AtividadePadrao(nome=\"Poker\")\n espaco = EspacoFisico(nome = 'nome', tipoEspacoFisico = 'padrao', capacidade = 9999, evento = evento, atividade = atividade)\n self.assertFalse(espaco.tipoEspacoFisico == '')\n\n def test_impedir_que_atividades_ocorram_no_mesmo_espaco_fisico_com_tempos_sobrepostos(self):\n evento = Evento(nome=\"Teste\")\n horario_definido= Horario(data = '2017-09-15', hora_inicio = '12:30:00', hora_fim = '22:30:00')\n atividade = AtividadePadrao(nome=\"Poker\", horario=horario_definido)\n atividade_secundaria = AtividadePadrao(nome=\"Xadrez\", horario=horario_definido)\n espaco = EspacoFisico(nome='nome', tipoEspacoFisico='padrao', capacidade=9999, evento=evento,\n atividade=atividade)\n self.assertFalse(espaco.tipoEspacoFisico == '')\n self.assertFalse(atividade.horario == atividade_secundaria.horario)\n\n def test_permitir_usuario_usar_um_espaco_fisico_dentro_de_um_espaco_fisico(self):\n evento = Evento(nome=\"Teste\")\n atividade = AtividadePadrao(nome=\"Poker\")\n espaco = EspacoFisico(nome='nome', tipoEspacoFisico='padrao', capacidade=9999, evento=evento,\n atividade=atividade)\n espaco_interno = EspacoFisico(nome='Interno', tipoEspacoFisico='padrao', capacidade=99, evento=evento,\n atividade=atividade)\n self.assertTrue(espaco_interno in espaco)\n \"\"\"\n Ex.: O Usuário cria um espaço físico IFPI. Depois criar um espaço Físico\n Prédio B e indica que ele é no IFPI. Depois cria um Laboratório B3-18 e indica\n que fica no espaço físico Prédio B)\n \"\"\"\n","sub_path":"tests/core/test_espacoFisico.py","file_name":"test_espacoFisico.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"341949794","text":"import pygame, sys\nfrom pygame import *\n\npygame.init()\n\nxPos =400\nyPos = 400\n\nWIDTH = 800\nHEIGHT = 600\nBLACK = (0,0,0)\nWHITE = (255,255,255)\nRED = (255,0,0)\nGREEN = (0,255,0)\nBLUE = (0,0,255)\n\ncorsairMeme = pygame.image.load('mousePad.jpg')\nwindow = pygame.display.set_mode((WIDTH, HEIGHT),0, 32)\nwindow.blit(corsairMeme, (0,0))\n\n\nfont = pygame.font.SysFont(None,40)\ntext = font.render(\"Corsair suddenly surrounded by darkness.\",False,WHITE)\nmemeTalk = text.get_rect()\n\n\n\ndef greyScreen():\n pxarray = pygame.PixelArray(window)\n for y in xrange(HEIGHT):\n for x in xrange(WIDTH):\n RED = window.get_at((x,y)).r\n GREEN = window.get_at((x,y)).g\n BLUE = window.get_at((x,y)).b\n grey = (RED + GREEN + BLUE)/3\n\n pxarray[x,y] = (grey,grey,grey)\n\n del pxarray\n\nwhile True:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n pressed = pygame.key.get_pressed()\n if pressed[K_ESCAPE]:\n pygame.quit()\n sys.exit()\n\n\n memeTalk.centerx = 400\n memeTalk.centery = 560\n\n\n pygame.draw.rect(window, 0, memeTalk, 10)\n window.blit(text,memeTalk)\n greyScreen()\n pygame.display.flip()\n pygame.display.update()","sub_path":"Image.py","file_name":"Image.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"51190187","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n# Generated file, DO NOT EDIT\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass ContributionProviderDetails(Model):\n \"\"\"ContributionProviderDetails.\n\n :param display_name: Friendly name for the provider.\n :type display_name: str\n :param name: Unique identifier for this provider. The provider name can be used to cache the contribution data and refer back to it when looking for changes\n :type name: str\n :param properties: Properties associated with the provider\n :type properties: dict\n :param version: Version of contributions assoicated with this contribution provider.\n :type version: str\n \"\"\"\n\n _attribute_map = {\n 'display_name': {'key': 'displayName', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n 'properties': {'key': 'properties', 'type': '{str}'},\n 'version': {'key': 'version', 'type': 'str'}\n }\n\n def __init__(self, display_name=None, name=None, properties=None, version=None):\n super(ContributionProviderDetails, self).__init__()\n self.display_name = display_name\n self.name = name\n self.properties = properties\n self.version = version\n","sub_path":"vsts/vsts/contributions/v4_0/models/contribution_provider_details.py","file_name":"contribution_provider_details.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"140304574","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom .forms import UserRegisterForm, UserProfileForm, ProfileUpdateForm, UserUpdateForm\nfrom delCampo.models import Profile\n\n\ndef register(request):\n if request.method == 'POST':\n form = UserRegisterForm(request.POST)\n profile_form = UserProfileForm(request.POST)\n if form.is_valid() and profile_form.is_valid():\n #default save\n user = form.save()\n #adding profile form content to user\n request.user.profile = user.profile\n request.user.profile.department = profile_form.cleaned_data['department']\n request.user.profile.phone = profile_form.cleaned_data['phone']\n request.user.profile.save()\n messages.success(request, f'Successfully Created Account!')\n return redirect(\"/login\")\n else:\n form = UserRegisterForm()\n profile_form = UserProfileForm()\n return render(request, 'register.html', {'form': form, 'profile_form': profile_form})\n\n\n@login_required\ndef profile(request):\n if request.method == 'POST':\n user_update_form = UserUpdateForm(request.POST, instance=request.user)\n profile_update_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)\n if user_update_form.is_valid() and profile_update_form.is_valid():\n user = user_update_form.save()\n request.user.profile = user.profile\n request.user.profile.imagePic = profile_update_form.cleaned_data['imagePic']\n request.user.profile.department = profile_update_form.cleaned_data['department']\n request.user.profile.phone = profile_update_form.cleaned_data['phone']\n request.user.profile.save()\n\n profile_update_form.save()\n messages.success(request, f'Your account has been updated!')\n return redirect('/profile')\n\n else:\n user_update_form = UserUpdateForm(instance=request.user)\n profile_update_form = ProfileUpdateForm(instance=request.user.profile)\n\n context = {\n 'user_update_form': user_update_form,\n 'profile_update_form': profile_update_form\n }\n\n return render(request, 'profile.html', context)\n","sub_path":"src/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"341336137","text":"from PyPDF2 import PdfFileWriter, PdfFileReader\r\nimport io\r\nfrom reportlab.pdfgen import canvas\r\nfrom reportlab.lib.pagesizes import letter\r\n\r\n# read your existing PDF\r\nexisting_pdf = PdfFileReader(open(\"datacamp_1.pdf\", \"rb\"))\r\npages = existing_pdf.getNumPages()\r\n\r\ntemp_pdf_writer = PdfFileWriter()\r\n\r\nfor i in range(pages):\r\n packet = io.BytesIO()\r\n # create a new PDF with Reportlab\r\n can = canvas.Canvas(packet, pagesize=letter)\r\n #can.drawString(300, 10, f\"{i+1}\")\r\n can.setFont('Times-Roman', 35)\r\n can.drawString(960, 30, f\"{i+1}\")\r\n can.save()\r\n\r\n \r\n #move to the beginning of the StringIO buffer\r\n packet.seek(0)\r\n new_pdf = PdfFileReader(packet)\r\n first_page = new_pdf.getPage(0)\r\n temp_pdf_writer.addPage(first_page)\r\n\r\n del can\r\n \r\n\r\noutput = PdfFileWriter()\r\n# add the \"watermark\" (which is the new pdf) on the existing page\r\nfor i in range(pages):\r\n page = existing_pdf.getPage(i)\r\n page.mergePage(temp_pdf_writer.getPage(i))\r\n output.addPage(page)\r\n \r\n# finally, write \"output\" to a real file\r\noutputStream = open(f\"destination{pages}.pdf\", \"wb\")\r\noutput.write(outputStream)\r\noutputStream.close()\r\n","sub_path":"pdf_watermark.py","file_name":"pdf_watermark.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"348599044","text":"from snr import *\n\n\n# Stirling of the first kind\ndef stirling_first_kind():\n first_st = Block.blank()\n first_st[0][0] = 1\n\n for n in range(std_l):\n first_st[n + 1] = first_st[n] * Seq([n, 1])\n\n fs_f = first_st.f(a=1)\n\n print(fs_f)\n print(fs_f.i())\n\n\n# Stirling of the second kind\ndef stirling_second_kind():\n second_st = Block.blank()\n second_st[0][0] = 1\n\n for n in range(1, std_l):\n for k in range(n + 1):\n if k == 0:\n second_st[n][k] = 0\n else:\n second_st[n][k] = k * second_st[n - 1][k] + second_st[n - 1][k - 1]\n\n ss_f = second_st.f()\n\n print(ss_f)\n print(ss_f.i())\n\n\nstirling_first_kind()\nprint()\nstirling_second_kind()","sub_path":"archive/misc/triangles/stirling_triangles.py","file_name":"stirling_triangles.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"608258171","text":"def sockMerchant(n, ar):\n unique = []\n mydict = {}\n for i in ar:\n\n if i not in unique:\n unique = unique + [i]\n\n mydict[i] = 1\n else:\n mydict[i] += 1\n results = list(mydict.values())\n result = [i // 2 for i in results]\n result = sum(result)\n return result\n\n\nprint(sockMerchant(10, [1, 2, 1, 2, 1, 1, 3, 2, 3, 4]))\n","sub_path":"sockMerchant.py","file_name":"sockMerchant.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"34621737","text":"import nltk\nfrom nltk.corpus import stopwords\nimport re\nimport os\nimport numpy as np\n\nclass TextAnalyzer:\n\n\tword_list = []\n\tsent_list = []\n\n\tdef __init__(self, word_list_dir=None, pos_sent_list_dir=None, neg_sent_list_dir=None):\n\t\t\"\"\"\n\t\t\tInitializing Text Analyzer Object, User may insert the word list dictionary and sentiment \n\t\t\tdictionary for further usage\n\n\t\t\tArgs:\n\t\t\t\tword_list_dir(str, optional): Directory of the word dictionary list (file in txt format)\n\t\t\t\tpos_sent_dir(str, optional): Directory of positive sentiment dictionary list (file in txt format)\n\t\t\t\tneg_sent_dir(str, optional): Directory of negative sentiment dictionary list (file in txt format)\n\t\t\"\"\"\n\t\tself.word_list_dir = word_list_dir\n\t\tself.pos_sent_list_dir = pos_sent_list_dir\n\t\tself.neg_sent_list_dir = neg_sent_list_dir\n\n\t\t# Loading Word List Dictionary if given\n\t\tif (word_list_dir is not None):\n\t\t\tif os.path.isfile(word_list_dir):\n\t\t\t\tread_word = open(word_list_dir, 'r')\n\t\t\t\treadline_word = read_word.readlines()\n\t\t\t\tif readline_word is not None:\n\t\t\t\t\tfor word in readline_src:\n\t\t\t\t\t\tself.word_list.append(word)\n\t\t\t\telse:\n\t\t\t\t\traise StandardError(\"Word List Dictionary is empty\")\n\t\t\telse:\n\t\t\t\traise StandardError(\"Word List Dictionary is not found\")\n\n\t\t# Loading Positive Sentiment List Dictionary if given\n\t\tif (pos_sent_list_dir is not None):\n\t\t\tif os.path.isfile(pos_sent_list_dir):\n\t\t\t\tread_sen = open(pos_sent_list_dir, 'r')\n\t\t\t\treadline_sen = read_sen.readlines()\n\t\t\t\tif readline_sen is not None:\n\t\t\t\t\tfor sen in readline_src:\n\t\t\t\t\t\tself.pos_sent_list.append(sen)\n\t\t\t\telse:\n\t\t\t\t\traise StandardError(\"Positive Sentiment List Dictionary is empty\")\n\t\t\telse:\n\t\t\t\traise StandardError(\"Positive Sentiment List Dictionary is not found\")\n\n\t\t# Loading Negative Sentiment List Dictionary if given\n\t\tif (neg_sent_list_dir is not None):\n\t\t\tif os.path.isfile(neg_sent_list_dir):\n\t\t\t\tread_sen = open(neg_sent_list_dir, 'r')\n\t\t\t\treadline_sen = read_sen.readlines()\n\t\t\t\tif readline_sen is not None:\n\t\t\t\t\tfor sen in readline_src:\n\t\t\t\t\t\tself.neg_sent_list.append(sen)\n\t\t\t\telse:\n\t\t\t\t\traise StandardError(\"Negative Sentiment List Dictionary is empty\")\n\t\t\telse:\n\t\t\t\traise StandardError(\"Negative Sentiment List Dictionary is not found\")\n\n\tdef get_text_sentiment(cls, txt):\n\t\t\"\"\"\n\t\t\tRetrieve sentiment of a text\n\n\t\t\tArgs:\n\t\t\t\ttext(string): Text to be analyzed\n\n\t\t\tReturns:\n\t\t\t\tpositive_sentiment(float): Positive sentiment rating\n\t\t\t\tnegative_sentiment(float): Negative sentiment rating\n\t\t\"\"\"\n\t\tprint(\"Under Construction\")\n\t\tpositive_sentiment = 0.0\n\t\tnegative_sentiment = 0.0\n\n\t\t# normalize text\n\t\t# convert to freqdist\n\t\t# Get sentiment with naive bayes\n\n\t\treturn positive_sentiment, negative_sentiment\n\n\tdef retrieve_non_dictionary_words(self, text, word_list_dir=None):\n\t\tif (word_list_dir is not None):\n\t\t\tself.word_list_dir = word_list_dir\n\n\t\t\t# Loading Word List Dictionary if given\n\t\t\tif (word_list_dir is not None):\n\t\t\t\tif os.path.isfile(word_list_dir):\n\t\t\t\t\tread_word = open(word_list_dir, 'r')\n\t\t\t\t\treadline_word = read_word.readlines()\n\t\t\t\t\tif readline_word is not None:\n\t\t\t\t\t\tfor word in readline_src:\n\t\t\t\t\t\t\tself.word_list.append(word)\n\t\t\t\t\telse:\n\t\t\t\t\t\traise StandardError(\"Word List Dictionary is empty\")\n\t\t\t\telse:\n\t\t\t\t\traise StandardError(\"Word List Dictionary is not found\")\n\n\t\tif (not self.word_list):\n\t\t\treturn \"No dictionary found\"\n\n\t\tfiltered_text = [word for word in text if word not in word_list]\n\t\treturn filtered_text","sub_path":"classifier/ft/lib/TextAnalyzer.py","file_name":"TextAnalyzer.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"122438464","text":"# NMEC: 8913\n# NOME: Tomás Candeias\n\ndef printStocks(stocks):\n\tdef valor(stock):\n\t\treturn float((stock[3]*100)/stock[2]) - 100\n\t\n\tfor stock in stocks:\n\t\tprint(\"{:<10}{:<10}{:>10.2f}{:>10.2f}{:>10}{:>10.1f}%\".format(stock[0], stock[1], stock[2], stock[3], stock[4], valor(stock)))\n\n\n\n# Cada tuplo = (empresa, cidade, abertura, fecho, volume)\nstocks = [\n\t('INTC', 'London', 34.249, 34.451, 1792860),\n\t('TSLA', 'London', 221.33, 229.63, 398520),\n\t('EA', 'Paris', 72.63, 68.98, 1189510),\n\t('INTC', 'Tokyo', 33.22001, 34.28999, 4509110),\n\t('TSLA', 'Paris', 217.35, 217.75, 252500),\n\t('ATML', 'Frankfurt', 8.23, 8.36, 810440),\n]\n\nprint(\"\\na)\")\nprintStocks(stocks)\n\nprint(\"\\nb)\")\nstocks2 = sorted(stocks, key=lambda stock: (stock[0], -1*stock[4]))\nprintStocks(stocks2)\n\nprint(\"\\nc)\")\nstocks3 = [stock for stock in stocks if stock[1] == \"Paris\"]\nprintStocks(stocks3)\n\nprint(\"\\nd)\")\nstocks4 = []\nwith open(\"stocks.txt\", \"r\") as f:\n\tfor line in f:\n\t\tline = line.strip().split(\"\\t\")\n\t\tstocks4.append((line[0], line[1], float(line[2]), float(line[3]), int(line[4])))\n\t\n\nprintStocks(stocks4)\n# As condições seguintes devem ser verdadeiras\nassert type(stocks4)==list\nassert type(stocks4[0])==tuple\nassert len(stocks4[0])==5\nassert type(stocks4[0][2])==float\nassert type(stocks4[0][4])==int\nprint(\"FIM\")\n\n","sub_path":"Programming Fundamentals/Práticas/extray/stocks.py","file_name":"stocks.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"626747459","text":"import cv2\nimport matplotlib.pyplot as plt\n\n# 高斯金字塔\nimg = cv2.imread('timg.jpg', 0) # 直接读为灰度图像\nimg1 = cv2.pyrDown(img)\nimg2 = cv2.pyrUp(img)\n\n# 拉普拉斯金字塔 常用于图像压缩\n\ntemp_img1 = cv2.pyrDown(img1)\ntemp = cv2.pyrUp(temp_img1)\nimg3 = img1 - temp\n\nplt.subplot(221)\nplt.imshow(img, 'gray')\nplt.subplot(222)\nplt.imshow(img1, 'gray')\nplt.subplot(223)\nplt.imshow(img2, 'gray')\nplt.subplot(224)\nplt.imshow(img3, 'gray')\n\n# 图像大小变为原来的1/4\nprint(img.shape)\nprint(img1.shape)\n\n# 图像变为原来的2倍\nprint(img2.shape)\n\nplt.show()\n","sub_path":"imagepyr.py","file_name":"imagepyr.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"186546542","text":"from collections import OrderedDict\nfrom django.contrib import admin\nfrom django.conf.urls import patterns\nfrom django.shortcuts import render_to_response\nfrom django.core.urlresolvers import reverse\nimport django.db.models.options\nimport django.db.models.fields.related\nimport ptree.constants\nfrom django.http import HttpResponse, HttpResponseBadRequest\nfrom django.contrib.staticfiles.templatetags.staticfiles import static as static_template_tag\nimport ptree.session.models\nfrom ptree.common import currency, app_name_format\nfrom data_exports.admin import ExportAdmin\nfrom django.utils.importlib import import_module\nimport os\nfrom inspect_model import InspectModel\nimport inspect\nimport time\nfrom textwrap import TextWrapper\n\nLINE_BREAK = '\\r\\n'\nMODEL_NAMES = [\"Participant\", \"Match\", \"Treatment\", \"Experiment\", \"SessionParticipant\", \"Session\"]\n\ndef new_tab_link(url, label):\n return '{}'.format(url, label)\n\ndef start_urls_for_experiment(experiment, request):\n if request.GET.get(ptree.constants.experimenter_access_code) != experiment.experimenter_access_code:\n return HttpResponseBadRequest('{} parameter missing or incorrect'.format(ptree.constants.experimenter_access_code))\n participants = experiment.participants()\n urls = [request.build_absolute_uri(participant.start_url()) for participant in participants]\n return HttpResponse('\\n'.join(urls), content_type=\"text/plain\")\n\ndef remove_duplicates(lst):\n return list(OrderedDict.fromkeys(lst))\n\ndef get_readonly_fields(Model, fields_specific_to_this_subclass=None):\n\n fields_for_this_model_type = {\n 'Participant':\n ['name',\n 'link',\n 'bonus_display',\n 'progress'],\n 'Match':\n [],\n 'Treatment':\n ['link'],\n 'Experiment':\n ['experimenter_input_link'],\n 'Session':\n ['time_started',\n 'experiment_names',\n 'start_urls_link',\n 'magdeburg_start_urls_link',\n 'global_start_link',\n 'mturk_snippet_link',\n 'payments_ready',\n 'payments_link',\n 'base_pay_display',],\n 'SessionParticipant':\n ['bonus_display',\n 'start_link',\n 'progress',\n 'current_experiment',\n 'progress_in_current_experiment'],\n }[Model.__name__]\n\n return remove_duplicates(fields_for_this_model_type + (fields_specific_to_this_subclass or []))\n\ndef get_list_display(Model, readonly_fields, first_fields=None):\n\n first_fields = {\n 'Participant':\n ['name',\n 'session',\n 'experiment',\n 'treatment',\n 'match',\n 'visited',\n 'progress'],\n 'Match':\n ['id',\n 'session',\n 'experiment',\n 'treatment'],\n 'Treatment':\n ['name',\n 'session',\n 'experiment'],\n 'Experiment':\n ['name',\n 'session'],\n 'SessionParticipant':\n ['name',\n 'start_link',\n 'session',\n 'visited',\n 'progress',\n 'current_experiment',\n 'progress_in_current_experiment'],\n 'Session':\n ['name',\n 'hidden'],\n }[Model.__name__]\n\n last_fields = {\n 'Participant': [],\n 'Match': [],\n 'Treatment': [],\n 'Experiment': [],\n 'SessionParticipant': [\n 'start_link',\n 'exclude_from_data_analysis',\n 'experimenter_comment',\n ],\n 'Session': [\n\n 'comment',\n ],\n }[Model.__name__]\n\n\n fields_to_exclude = {\n 'Participant':\n {'id',\n 'code',\n 'index_in_sequence_of_views',\n 'me_in_previous_experiment_content_type',\n 'me_in_previous_experiment_object_id',\n 'me_in_next_experiment_content_type',\n 'me_in_next_experiment_object_id',\n 'session_participant',\n },\n 'Match':\n set(),\n 'Treatment':\n {'id',\n 'label'},\n 'Experiment':\n {'id',\n 'label',\n 'session_access_code',\n 'next_experiment_content_type',\n 'next_experiment_object_id',\n 'next_experiment',\n 'previous_experiment_content_type',\n 'previous_experiment_object_id',\n 'previous_experiment',\n 'experimenter_access_code',\n },\n 'SessionParticipant':\n {'id',\n 'index_in_sequence_of_experiments',\n 'label',\n 'me_in_first_experiment_content_type',\n 'me_in_first_experiment_object_id',\n 'code',\n 'ip_address',\n 'mturk_assignment_id',\n 'mturk_worker_id'},\n 'Session':\n {'id',\n 'label',\n 'first_experiment_content_type',\n 'first_experiment_object_id',\n 'first_experiment',\n 'git_hash',\n 'experimenter_access_code',\n 'preassign_matches',\n 'is_for_mturk',\n 'base_pay',\n # don't hide the code, since it's useful as a checksum (e.g. if you're on the payments page)\n }\n }[Model.__name__]\n\n\n\n\n\n all_field_names = [field.name for field in Model._meta.fields if field.name not in fields_to_exclude]\n list_display = first_fields + readonly_fields + all_field_names\n list_display = [f for f in list_display if f not in last_fields] + last_fields\n return _add_links_for_foreign_keys(Model, remove_duplicates(list_display))\n\nclass FieldLinkToForeignKey:\n def __init__(self, list_display_field):\n self.list_display_field = list_display_field\n\n @property\n def admin_order_field(self):\n return self.list_display_field\n\n @property\n def __name__(self):\n return self.list_display_field\n\n def __repr__(self):\n return self.list_display_field\n\n def __str__(self):\n return self.list_display_field\n\n def __call__(self, instance):\n object = getattr(instance, self.list_display_field)\n if object is None:\n return \"(None)\"\n else:\n url = reverse('admin:%s_%s_change' %(object._meta.app_label, object._meta.module_name), \n args=[object.id])\n return '%s' % (url, object.__unicode__())\n\n @property\n def allow_tags(self):\n return True\n\ndef is_fk_link_to_parent_class(field):\n return isinstance(field, FieldLinkToForeignKey) and field.__name__ in {'match', 'treatment', 'experiment', 'session'}\n\ndef _add_links_for_foreign_keys(model, list_display_fields):\n \n result = []\n for list_display_field in list_display_fields:\n if hasattr(model, list_display_field):\n try:\n if isinstance(model._meta.get_field(list_display_field), \n django.db.models.fields.related.ForeignKey):\n result.append(FieldLinkToForeignKey(list_display_field))\n continue\n except django.db.models.options.FieldDoesNotExist:\n pass\n result.append(list_display_field)\n return result\n\nclass NonHiddenSessionListFilter(admin.SimpleListFilter):\n title = \"session\"\n\n parameter_name = \"session\"\n\n def lookups(self, request, model_admin):\n \"\"\"\n Returns a list of tuples. The first element in each\n tuple is the coded value for the option that will\n appear in the URL query. The second element is the\n human-readable name for the option that will appear\n in the right sidebar.\n \"\"\"\n return [(session.id, session.id) for session\n in ptree.session.models.Session.objects.filter(hidden=False)]\n\n def queryset(self, request, queryset):\n \"\"\"\n Returns the filtered queryset based on the value\n provided in the query string and retrievable via\n `self.value()`.\n \"\"\"\n if self.value() is not None:\n return queryset.filter(session__pk=self.value())\n else:\n return queryset\n\nclass PTreeBaseModelAdmin(admin.ModelAdmin):\n \"\"\"Allow leaving fields blank in the admin\"\"\"\n def get_form(self, request, obj=None, **kwargs):\n form = super(PTreeBaseModelAdmin, self).get_form(request, obj, **kwargs)\n for key in form.base_fields.keys():\n try:\n model_field, _, _, _ = self.model._meta.get_field_by_name(key)\n if model_field.null:\n form.base_fields[key].required = False\n except django.db.models.options.FieldDoesNotExist:\n pass\n return form\n\nclass ParticipantAdmin(PTreeBaseModelAdmin):\n change_list_template = \"admin/ptree_change_list.html\"\n\n def link(self, instance):\n url = instance.start_url()\n return new_tab_link(url, 'Link')\n\n link.short_description = \"Start link\"\n link.allow_tags = True\n list_filter = [NonHiddenSessionListFilter, 'experiment', 'treatment', 'match']\n list_per_page = 30\n\n def queryset(self, request):\n qs = super(ParticipantAdmin, self).queryset(request)\n return qs.filter(session__hidden=False)\n\nclass MatchAdmin(PTreeBaseModelAdmin):\n change_list_template = \"admin/ptree_change_list.html\"\n\n list_filter = [NonHiddenSessionListFilter, 'experiment', 'treatment']\n list_per_page = 30\n\n def queryset(self, request):\n qs = super(MatchAdmin, self).queryset(request)\n return qs.filter(session__hidden=False)\n\n\nclass TreatmentAdmin(PTreeBaseModelAdmin):\n change_list_template = \"admin/ptree_change_list.html\"\n\n def link(self, instance):\n if instance.experiment.session.preassign_matches:\n return 'Not available (--preassign-matches was set)'\n url = instance.start_url()\n return new_tab_link(url, 'Link')\n\n link.short_description = \"Demo link\"\n link.allow_tags = True\n list_filter = [NonHiddenSessionListFilter, 'experiment']\n\n def queryset(self, request):\n qs = super(TreatmentAdmin, self).queryset(request)\n return qs.filter(session__hidden=False)\n\n\nclass ExperimentAdmin(PTreeBaseModelAdmin):\n change_list_template = \"admin/ptree_change_list.html\"\n\n def experimenter_input_link(self, instance):\n url = instance.experimenter_input_url()\n return new_tab_link(url, 'Link')\n\n def queryset(self, request):\n qs = super(ExperimentAdmin, self).queryset(request)\n return qs.filter(session__hidden=False)\n\n experimenter_input_link.short_description = 'Link for experimenter input during gameplay'\n experimenter_input_link.allow_tags = True\n list_filter = [NonHiddenSessionListFilter]\n\nclass SessionParticipantAdmin(PTreeBaseModelAdmin):\n change_list_template = \"admin/ptree_change_list.html\"\n\n list_filter = [NonHiddenSessionListFilter]\n\n readonly_fields = get_readonly_fields(ptree.session.models.SessionParticipant, [])\n list_display = get_list_display(ptree.session.models.SessionParticipant, readonly_fields)\n list_editable = ['exclude_from_data_analysis']\n\n\n def start_link(self, instance):\n url = instance.start_url()\n return new_tab_link(url, 'Link')\n start_link.allow_tags = True\n\n def queryset(self, request):\n qs = super(SessionParticipantAdmin, self).queryset(request)\n return qs.filter(session__hidden=False)\n\nclass SessionAdmin(PTreeBaseModelAdmin):\n change_list_template = \"admin/ptree_change_list.html\"\n\n def get_urls(self):\n urls = super(SessionAdmin, self).get_urls()\n my_urls = patterns('',\n (r'^(?P\\d+)/payments/$', self.admin_site.admin_view(self.payments)),\n (r'^(?P\\d+)/mturk_snippet/$', self.admin_site.admin_view(self.mturk_snippet)),\n (r'^(?P\\d+)/start_urls/$', self.start_urls),\n (r'^(?P\\d+)/magdeburg_start_urls/$', self.magdeburg_start_urls),\n\n )\n return my_urls + urls\n\n def start_urls_list(self, request, session):\n participants = session.participants()\n return [request.build_absolute_uri(participant.start_url()) for participant in participants]\n\n def start_urls(self, request, pk):\n session = self.model.objects.get(pk=pk)\n\n if request.GET.get(ptree.constants.experimenter_access_code) != session.experimenter_access_code:\n return HttpResponseBadRequest('{} parameter missing or incorrect'.format(ptree.constants.experimenter_access_code))\n urls = self.start_urls_list(request, session)\n return HttpResponse('\\n'.join(urls), content_type=\"text/plain\")\n\n def start_urls_link(self, instance):\n if not instance.first_experiment:\n return 'No experiments in sequence'\n return new_tab_link('{}/start_urls/?{}={}'.format(instance.pk,\n ptree.constants.experimenter_access_code,\n instance.experimenter_access_code), 'Link')\n\n start_urls_link.short_description = 'Start URLs'\n start_urls_link.allow_tags = True\n\n def magdeburg_start_urls(self, request, pk):\n session = self.model.objects.get(pk=pk)\n urls = self.start_urls_list(request, session)\n import_file_lines = []\n for i, url in enumerate(urls):\n start = url.index('?')\n params = url[start+1:]\n import_file_lines.append('maxlab-{} | 1 | /name {}&{}&{}={}'.format(str(i+1).zfill(2),\n i+1,\n params,\n ptree.constants.session_participant_label,\n i+1))\n response = HttpResponse('\\n'.join(import_file_lines), content_type=\"text/plain\")\n response['Content-Disposition'] = 'attachment; filename=\"{}\"'.format('ptree-{}.ini'.format(time.time()))\n return response\n\n def magdeburg_start_urls_link(self, instance):\n if not instance.first_experiment:\n return 'No experiments in sequence'\n return new_tab_link('{}/magdeburg_start_urls/?{}={}'.format(instance.pk,\n ptree.constants.experimenter_access_code,\n instance.experimenter_access_code), 'Link')\n\n magdeburg_start_urls_link.short_description = 'Magdeburg Start URLs'\n magdeburg_start_urls_link.allow_tags = True\n\n\n def mturk_snippet(self, request, pk):\n session = self.model.objects.get(pk=pk)\n experiment = session.first_experiment\n hit_page_js_url = request.build_absolute_uri(static_template_tag('ptree/js/mturk_hit_page.js'))\n experiment_url = request.build_absolute_uri(experiment.start_url())\n return render_to_response('admin/MTurkSnippet.html',\n {'hit_page_js_url': hit_page_js_url,\n 'experiment_url': experiment_url,},\n content_type='text/plain')\n\n def mturk_snippet_link(self, instance):\n if not instance.first_experiment:\n return 'No experiments in sequence'\n if instance.is_for_mturk:\n return new_tab_link('{}/mturk_snippet/'.format(instance.pk), 'Link')\n else:\n return 'N/A (is_for_mturk = False)'\n\n mturk_snippet_link.allow_tags = True\n mturk_snippet_link.short_description = \"HTML snippet for MTurk HIT page\"\n\n def global_start_link(self, instance):\n if instance.is_for_mturk:\n return 'N/A (is_for_mturk = True)'\n if not instance.first_experiment:\n return 'No experiments in sequence'\n else:\n url = instance.start_url()\n return new_tab_link(url, 'Link')\n\n global_start_link.allow_tags = True\n global_start_link.short_description = \"Global start URL (only if you can't use regular start URLs)\"\n\n def payments(self, request, pk):\n session = self.model.objects.get(pk=pk)\n participants = session.participants()\n total_payments = sum(participant.total_pay() or 0 for participant in session.participants())\n\n # order by label if they are numbers. or should we always order by label?\n\n\n return render_to_response('admin/Payments.html',\n {'participants': participants,\n 'total_payments': currency(total_payments),\n 'session_code': session.code,\n 'session_name': session,\n 'base_pay': currency(session.base_pay),\n })\n\n def payments_link(self, instance):\n if instance.payments_ready():\n link_text = 'Ready'\n else:\n link_text = 'Incomplete'\n return new_tab_link('{}/payments/'.format(instance.pk), link_text)\n\n payments_link.short_description = \"Payments page\"\n payments_link.allow_tags = True\n\n readonly_fields = get_readonly_fields(ptree.session.models.Session, [])\n list_display = get_list_display(ptree.session.models.Session, readonly_fields)\n\n list_editable = ['hidden']\n\ndef get_data_export_fields(app_label):\n admin_module = import_module('{}.admin'.format(app_label))\n export_info = {}\n for model_name in MODEL_NAMES:\n if model_name == 'Session':\n list_display = SessionAdmin.list_display\n elif model_name == 'SessionParticipant':\n list_display = SessionParticipantAdmin.list_display\n else:\n list_display = getattr(admin_module, '{}Admin'.format(model_name)).list_display\n # remove since these are redundant\n export_info[model_name] = [field for field in list_display if not is_fk_link_to_parent_class(field)]\n return export_info\n\ndef build_doc_file(app_label):\n export_fields = get_data_export_fields(app_label)\n app_models_module = import_module('{}.models'.format(app_label))\n\n first_line = '{}: Field descriptions'.format(app_name_format(app_label))\n\n docs = ['{}\\n{}\\n\\n'.format(first_line,\n '*'*len(first_line))]\n\n doc_string_wrapper = TextWrapper(\n width=100,\n initial_indent='\\t'*2,\n subsequent_indent='\\t'*2\n )\n\n for model_name in MODEL_NAMES:\n if model_name == 'SessionParticipant':\n Model = ptree.session.models.SessionParticipant\n elif model_name == 'Session':\n Model = ptree.session.models.Session\n else:\n Model = getattr(app_models_module, model_name)\n im = InspectModel(Model)\n member_types = {\n 'fields': im.fields,\n 'methods': im.methods,\n # TODO: add properties, attributes, and others\n }\n\n docs.append('\\n' + model_name)\n\n for member_name in export_fields[model_name]:\n\n if member_name in member_types['methods']:\n # check if it's a method\n member = getattr(Model, member_name)\n doc = inspect.getdoc(member)\n elif member_name in member_types['fields']:\n try:\n member = Model._meta.get_field_by_name(member_name)[0]\n doc = member.documentation\n except AttributeError:\n # maybe the field isn't from ptree.db\n doc = ''\n else:\n doc = '[not a field or method]'\n doc = doc or ''\n docs.append('\\n\\t' + member_name)\n if doc:\n docs.append('\\n'.join(doc_string_wrapper.wrap(doc)))\n\n output = '\\n'.join(docs)\n return output.replace('\\n', LINE_BREAK).replace('\\t', ' ')\n\ndef doc_file_name(app_label):\n return '{} -- field descriptions ({}).txt'.format(app_name_format(app_label),\n )\n\nclass PTreeExportAdmin(ExportAdmin):\n\n # In Django 1.7, I can set list_display_links to None and then put 'name' first\n list_display = ['get_export_link', 'docs_link', 'name']\n ordering = ['slug']\n list_filter = []\n\n def get_urls(self):\n urls = super(PTreeExportAdmin, self).get_urls()\n my_urls = patterns('',\n (r'^(?P\\d+)/docs/$', self.admin_site.admin_view(self.docs)),\n )\n return my_urls + urls\n\n def docs(self, request, pk):\n export = self.model.objects.get(pk=pk)\n app_label = export.model.app_label\n response = HttpResponse(build_doc_file(app_label))\n response['Content-Disposition'] = 'attachment; filename=\"{}\"'.format(doc_file_name(app_label))\n return response\n\n def docs_link(self, instance):\n return new_tab_link('{}/docs/'.format(instance.pk), label=doc_file_name(instance.model.app_label))\n\n docs_link.allow_tags = True\n docs_link.short_description = 'Field descriptions'\n","sub_path":"ptree/adminlib.py","file_name":"adminlib.py","file_ext":"py","file_size_in_byte":21313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"469012947","text":"from datetime import datetime\nfrom airflow.models import Variable\n\n\ndef read_load_dag_vars(var_prefix=\"\", **kwargs):\n \"\"\"Read Airflow variables for Load DAG\"\"\"\n load_max_active_runs = read_var(\"load_max_active_runs\", var_prefix, False, **kwargs)\n load_max_active_runs = (\n int(load_max_active_runs) if load_max_active_runs is not None else None\n )\n\n vars = {\n \"dataflow_template_path\": read_var(\n \"dataflow_template_path\", var_prefix, True, **kwargs\n ),\n \"dataflow_environment\": read_var(\n \"dataflow_environment\", var_prefix, True, True, **kwargs\n ),\n \"dataset_project_id\": read_var(\n \"dataset_project_id\", var_prefix, True, **kwargs\n ),\n \"dataset_name\": read_var(\"dataset_name\", var_prefix, True, **kwargs),\n \"input_file\": read_var(\"input_file\", var_prefix, True, **kwargs),\n \"output_table\": read_var(\"output_table\", var_prefix, True, **kwargs),\n \"notification_emails\": read_var(\"notification_emails\", None, False, **kwargs),\n \"load_schedule_interval\": read_var(\n \"load_schedule_interval\", var_prefix, False, **kwargs\n ),\n \"load_max_active_runs\": load_max_active_runs,\n }\n\n load_start_date = read_var(\"load_start_date\", var_prefix, False, **kwargs)\n if load_start_date is not None:\n vars[\"load_start_date\"] = datetime.strptime(load_start_date, \"%Y-%m-%d\")\n\n load_end_date = read_var(\"load_end_date\", var_prefix, False, **kwargs)\n if load_end_date is not None:\n vars[\"load_end_date\"] = datetime.strptime(load_end_date, \"%Y-%m-%d\")\n\n return vars\n\n\ndef read_var(\n var_name, var_prefix=None, required=False, deserialize_json=False, **kwargs\n):\n \"\"\"Read Airflow variable\"\"\"\n full_var_name = f\"{var_prefix}{var_name}\" if var_prefix is not None else var_name\n var = Variable.get(full_var_name, default_var=\"\", deserialize_json=deserialize_json)\n\n if var == \"\":\n var = None\n\n if var_prefix and var is None:\n var = read_var(var_name, None, required, deserialize_json, **kwargs)\n\n if var is None:\n var = kwargs.get(var_name)\n\n if required and var is None:\n raise ValueError(f\"{full_var_name} variable is required\")\n\n return var\n","sub_path":"airflow/dags/wordetl_airflow/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"259425355","text":"from keyGen import keyGen\nfrom readKey import readKey\nimport rsa\nfrom aesCrypt import aesCrypt\n\nclass reEncryption:\n def __init__(self,bobPubKey):\n self.bobPubkey = bobPubKey\n self.getKey()\n \n def getKey(self):\n keyGen.genKey()\n self.keys = readKey.readPubPriKey(\"\")\n self.mkey = readKey.readMKey()\n \n def genNewKey(self,bobPubKey,lastOrder):\n keys = \"\"\n pubprikey = readKey.readPubPriKey(\"\")\n selfPriKey = pubprikey[1]\n with open (\"fileEncrypt/aes/number.seq\", \"r\") as f:\n num = int(f.read())\n\n if(num == 0):\n return 0,None\n fileIDs = \"\"\n fileID = 0\n number = 0\n with open(\"fileEncrypt/aes/docKeys.key\", \"r\") as f:\n for i in range(0,num):\n encodedDocKey = f.readline() # readLine for\n if(i>> import logging\n>>> logging.getLogger()\n\nis identical to\n\n>>> from virt.lib.core import log_handler\n>>> log_handler.get_logger()\n\nSo you do not need to use log_handler all the time. It's up to you really.\n\nThough one reason why you need to use log_hander instead of logging is if you\nlogging.getLogger() only, no one will set up the logging handler therefore no\noutput will be displayed.\n\nRunning a test script using STAT should not be a problem since STAT calls\nlogging.basicConfig() and set up the logging handler.\n\nThe default logging level is DEBUG, The way to change the logging level is\n\n>>> log_handler.setlevel(20)\n\nAnd this module has some more useful functions that can parse head/tail of a\ntext file without storing entire file content on the memory.\n\n\"\"\"\n\nimport logging\nimport platform\nimport os\n\nfrom virt.lib.core import exception\n\n\n__version__ = \"1.0.0\" # PEP 8. Also check PEP 386 for the format.\nDEFAULT_FORMAT = '|%(levelname)-8s|%(message)s'\nBLOCK_SIZE = 1024 # Block size to parse data at once\n\n\ndef get_logger(name=None, loglevel=None):\n \"\"\"\n Return a logger as logging.getLogger(), but additionally run basicConfig\n only if Python interpreter is not IronPython - namely not ran by STAT.\n\n Args:\n name (str): Name of the logger\n loglevel (int): log level for the logger.\n\n \"\"\"\n if '.NET' not in platform.python_compiler():\n logging.basicConfig(format=DEFAULT_FORMAT, level=logging.INFO)\n\n ret_logger = logging.getLogger(name=name)\n if loglevel:\n ret_logger.setLevel(loglevel)\n\n return ret_logger\n\n\ndef setlevel(level):\n logging.root.setLevel(level)\n\n\ndef get_head(filename, line_num):\n \"\"\"\n Return a list of lines as many as line_num without storing the entire file\n content to the memory - this is for when handling a large size file.\n\n Args:\n filename (str): filename with an absolute path\n line_num (int): A number of lines that should be returned. If the\n entire file content has a less number of lines, return whatever\n available.\n\n Return:\n list: A list of lines\n \"\"\"\n\n if not os.path.exists(filename) or not os.path.isfile(filename):\n raise exception.LogHandlerException(\n 'file %s does not exist or not a file' % filename\n )\n\n output = ''\n\n with open(filename, 'r') as fileobj:\n fileobj.seek(0, os.SEEK_END)\n filesize = fileobj.tell()\n\n fileobj.seek(0)\n\n while fileobj.tell() < filesize:\n output += fileobj.read(BLOCK_SIZE)\n\n if output.count('\\n') >= line_num:\n return output.splitlines()[:line_num]\n\n # File content is shorter than 1024 and no more lines are available.\n # Return as it is.\n if len(output) < BLOCK_SIZE:\n return output.splitlines()\n\n # Cannot find given lines. Return whatever parsed\n return output.splitlines()\n\n\ndef get_tail(filename, line_num):\n \"\"\"\n Return a list of lines as many as line num without storing the entire file\n ceontent to the memory. This is for when handling a large size file.\n\n Args:\n filename (str): filename with an absolute path\n line_num (int): A number of lines that should be returned. If the\n entire file content has a less number of lines, return whatever\n available.\n\n Return:\n list: A list of lines\n \"\"\"\n\n if not os.path.exists(filename) or not os.path.isfile(filename):\n raise exception.LogHandlerException(\n 'file %s does not exist or not a file' % filename\n )\n\n output = ''\n\n with open(filename, 'r') as fileobj:\n fileobj.seek(0, os.SEEK_END)\n block_multiply = 1\n\n # File content is shorter than 1024 and no more lines are available.\n # Return as it is.\n if fileobj.tell() < BLOCK_SIZE:\n fileobj.seek(0)\n output = fileobj.read()\n return output.splitlines()[-line_num:]\n\n while fileobj.tell() > 0:\n fileobj.seek(-BLOCK_SIZE * block_multiply, os.SEEK_END)\n output += fileobj.read(BLOCK_SIZE)\n\n if output.count('\\n') >= line_num:\n return output.splitlines()[-line_num:]\n\n block_multiply += 1\n\n # Cannot find given lines. Return whatever parsed\n return output.splitlines()\n","sub_path":"lib/core/log_handler.py","file_name":"log_handler.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"251769388","text":"from setuptools import setup, find_packages\nimport os\n\n\ndef load_data_files():\n data_files = []\n\n for root, _, files in os.walk('data'):\n data_files.extend(['{r}/{f}'.format(r=root, f=f) for f in files])\n\n return [('data', data_files)]\n\nsetup(\n name='tyr',\n version='0.0.1',\n author='Mihir Singh (@citruspi)',\n author_email='mihir.singh@hudl.com',\n packages=find_packages(),\n test_suite='nose.collector',\n zip_safe=False,\n include_package_data=True,\n platforms='any',\n install_requires=[\n 'boto',\n 'boto3',\n 'pyChef',\n 'paramiko',\n 'click',\n 'PyYAML',\n 'requests',\n 'nose',\n 'cloudspecs',\n 'boto3'\n ],\n scripts=[\n 'scripts/replace-mongodb-servers',\n 'scripts/compact-mongodb-servers',\n 'scripts/build-mv-service'\n ],\n data_files=load_data_files()\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"602323649","text":"import socket\nimport sys\n\nPORT = 50000\nBUFSIZE = 4096\n\nclient = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n#socket.AF_INET6 でIPv6を\n#socket.SOCK_STREAMでTCP通信を指定している\n\n#サーバとの通信\nhost = input('接続先のサーバ:')\ntry:\n client.connect((host,PORT))\nexcept:\n print('接続できません')\n sys.exit()\n#サーバへのメッセージ送信\nmsg = input('write message : ')\nclient.sendall(msg.encode('utf-8'))\n\n#サーバからのメッセージの受信\ndata = client.recv(BUFSIZE)\nprint('サーバからのメッセージ : ')\nprint(data.decode('utf-8'))\n\nclient.close()\n","sub_path":"client1.py","file_name":"client1.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"343794916","text":"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nfrom collections import namedtuple\n\nimport tensorflow as tf\n\nimport settings\nfrom common.mnist import read_data_sets\nfrom solve import model\n\nModelConfig = namedtuple('ModelConfig', 'image_size labels')\nTrainingConfig = namedtuple('TrainingConfig', 'iterations batch_size')\n\n\ndef do_session(dataset, model_config, training_config, sess_fname):\n print('train set: %d test set: %d validation set: %d' %\n (len(dataset.train.images),\n len(dataset.test.images),\n len(dataset.validation.images)))\n\n # required\n sess = tf.InteractiveSession()\n\n # Create the model\n x = model.create_input_variable(model_config.image_size)\n y = model.create_model(x, model_config.image_size, model_config.labels)\n\n # placeholder for desired output\n y_ = tf.placeholder(tf.float32, [None, model_config.labels], name='desired_output')\n\n # Define loss and optimizer\n with tf.name_scope('cross_entropy'):\n diff = -tf.reduce_sum(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0)), reduction_indices=[1])\n with tf.name_scope('total'):\n cross_entropy = tf.reduce_mean(diff)\n tf.scalar_summary('cross entropy', cross_entropy)\n with tf.name_scope('train'):\n train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n\n # Test trained model\n # tf.argmax(y): prediction\n # tf.argmax(y_): label\n with tf.name_scope('accuracy'):\n with tf.name_scope('correct_prediction'):\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n with tf.name_scope('accuracy'):\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n tf.scalar_summary('accuracy', accuracy)\n\n merged = tf.merge_all_summaries()\n train_writer = tf.train.SummaryWriter('logs/train', sess.graph)\n test_writer = tf.train.SummaryWriter('logs/test')\n tf.initialize_all_variables().run()\n\n # Has to come after all the variable definitions\n saver = tf.train.Saver()\n\n # Train\n tf.initialize_all_variables().run()\n for iteration in range(training_config.iterations):\n batch_data, batch_labels = dataset.train.next_batch(training_config.batch_size)\n\n summary, _ = sess.run([merged, train_step], feed_dict={x: batch_data, y_: batch_labels})\n train_writer.add_summary(summary, iteration)\n if iteration % 100 == 0:\n # perform accuracy check\n validation_accuracy = sess.run(accuracy, feed_dict={x: dataset.validation.images,\n y_: dataset.validation.labels})\n # check for overfitting\n training_accuracy = sess.run(accuracy, feed_dict={x: dataset.train.images,\n y_: dataset.train.labels})\n print('epoch %d iteration %d done; ta %f va %f' %\n (dataset.train._epochs_completed, iteration, training_accuracy, validation_accuracy))\n\n saver.save(sess, sess_fname)\n\n summary, acc = sess.run([merged, accuracy], feed_dict={x: dataset.test.images, y_: dataset.test.labels})\n test_writer.add_summary(summary)\n test_writer.flush()\n return acc\n\n\ndef do_session_with_mnist(iterations, data_dir, sess_fname):\n data_sets = read_data_sets(data_dir, one_hot=True)\n model_cfg = ModelConfig(image_size=28*28, labels=10)\n training_cfg = TrainingConfig(iterations=iterations, batch_size=100)\n return do_session(data_sets, model_cfg, training_cfg, sess_fname)\n\n\ndef do_session_with_boolean(iterations, data_dir, sess_fname):\n labels = 2\n data_sets = read_data_sets(data_dir, one_hot=True, classes=labels)\n model_cfg = ModelConfig(image_size=settings.WINDOW_WIDTH*settings.WINDOW_HEIGHT, labels=labels)\n training_cfg = TrainingConfig(iterations=iterations, batch_size=100)\n with tf.name_scope('boolean'):\n return do_session(data_sets, model_cfg, training_cfg, sess_fname)\n\n\ndef parse_arguments(args=None):\n parser = argparse.ArgumentParser()\n parser.add_argument('--mnist', action='store_true')\n parser.add_argument('--boolean', action='store_true')\n parser.add_argument('--data_dir', type=str, required=True)\n parser.add_argument('--session_file', type=str, required=True)\n parser.add_argument('--iterations', type=int)\n return parser.parse_args(args)\n\n\nif __name__ == '__main__':\n args = parse_arguments()\n it = args.iterations\n if not args.mnist and not args.boolean:\n print('flag not specified')\n else:\n if args.mnist:\n accuracy = do_session_with_mnist(it, args.data_dir, args.session_file)\n elif args.boolean:\n accuracy = do_session_with_boolean(it, args.data_dir, args.session_file)\n print('test accuracy: %s' % accuracy)\n","sub_path":"solve/train_boolean.py","file_name":"train_boolean.py","file_ext":"py","file_size_in_byte":5566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"387261718","text":"__author__ = 'Timothy Portfolio'\n\n'''To-Do List'''\n\n# turn this into a .exe with GUI\n# improve readability\n# multi-threading possible with workerpool module?\n# better integration with retired leadership/permanent veterans\n\n'''Imports and Globals'''\n\nimport datetime\nfrom selenium import webdriver\nimport time\n\nparse_veterans = False # toggle on/off depending on needs\nfull_demotion_process = True # toggle on/off depending on needs\ntoday = datetime.date.today()\nignore_list = [\"Finn :D\"]\n\n'''Auxiliary Functions'''\n\nclass Parser(object):\n\n def index_num(self, rank):\n return {\n \"Veteran\": 4, \"Member\": 5, \"Recruit\": 6,\n }[rank]\n\n def invalid_tokens(self, date, vet_array):\n if (\"Today\" in date or \"Yesterday\" in date or\n \"Online\" in date or \"ago\" in date): #i.e. \"A minute ago\", \"5 minutes ago\"\n return True\n elif vet_array and (\"2014\" in date or \"2015\" in date):\n return True\n else:\n return False\n\n def abbr_to_num(self, abbr):\n return {\n 'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6,\n 'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12,\n }[abbr]\n\nclass UserProfile(object):\n\n def __init__(self, name, url):\n self.name = name\n self.url = url\n\n def get_name(self):\n return self.name\n\n def get_url(self):\n return self.url\n\nclass Browser(object):\n\n browser = webdriver.Firefox()\n parser = Parser()\n\n def __init__(self):\n self.browser.get('http://hellsgamers.com/')\n\n def goto(self, link):\n self.browser.get(link)\n\n def login(self):\n self.browser.find_element_by_id(\"sign_in\").click()\n time.sleep(3)\n username = self.browser.find_element_by_id(\"ips_username\")\n password = self.browser.find_element_by_id(\"ips_password\")\n username.send_keys(\"\") #redacted\n password.send_keys(\"\") #redacted\n self.browser.find_element_by_class_name(\"input_submit\").click()\n\n def find_tables(self, rank):\n tables = self.browser.find_elements_by_tag_name(\"table\")\n index = self.parser.index_num(rank)\n user_tr1 = tables[index].find_elements_by_class_name(\"row1\")\n user_tr2 = tables[index].find_elements_by_class_name(\"row2\")\n return user_tr1 + user_tr2\n\n def find_spans(self, link):\n self.browser.get(link)\n return self.browser.find_elements_by_css_selector(\".desc.lighter\")\n\n def add_note(self):\n self.browser.find_element_by_partial_link_text(\"Demote\").click()\n self.browser.find_element_by_partial_link_text(\"Member Note\").click()\n self.browser.find_element_by_link_text(\"Add Note\").click()\n time.sleep(3)\n note_field = self.browser.find_element_by_class_name(\"input_text\")\n note_field.send_keys(\"Test 5: Revenge of the Test\")\n self.browser.find_element_by_class_name(\"input_submit\").click()\n time.sleep(2)\n\n\nclass Scraper(object):\n\n def __init__(self, browser, rank):\n self.browser = browser\n self.rank = rank\n\n def collect(self):\n parser = Parser()\n self.browser.goto('http://hellsgamers.com/roster.html')\n tables = self.browser.find_tables(self.rank)\n users = []\n i = 0\n while i < len(tables):\n # print(el.get_attribute('innerHTML'))\n tdelements = tables[i].find_elements_by_tag_name('td')\n name = tdelements[1].text.replace('\\n', '').replace('\\t', '')\n date = tdelements[5].text.replace('\\n', '').replace('\\t', '').split('-',1)[0] #remove hour\n\n if self.rank is \"Veteran\":\n if not(parser.invalid_tokens(date, True)):\n link = tables[i].find_element_by_tag_name('a').get_attribute('href')\n print(name + \" - \" + date + \" - \" + link)\n user = UserProfile(name, link)\n users.append(user)\n else:\n if not(parser.invalid_tokens(date, False)):\n inactivity_length = 90 if self.rank == \"Member\" else 60\n day = int(date.split(' ',2)[0])\n month = date.split(' ',2)[1]\n year = int(date.split(' ',2)[2])\n\n diff = today - datetime.date(year, parser.abbr_to_num(month), day)\n if diff.days >= inactivity_length:\n link = tables[i].find_element_by_tag_name('a').get_attribute('href')\n print(name + \" - \" + str(diff.days) + \" days\" + \" - \" + link)\n user = UserProfile(name, link)\n users.append(user)\n i+=1\n return users\n\nclass Demoter(object):\n\n def __init__(self, browser, profiles, rank):\n self.browser = browser\n self.profiles = profiles\n self.rank = rank\n\n def verify(self):\n demoted_names = []\n parser = Parser()\n\n i = 0\n while i < len(self.profiles):\n time.sleep(2)\n span_list = self.browser.find_spans(self.profiles[i].get_url())\n last_active = \"\"\n for el in span_list:\n if \"Last Active\" in el.text:\n last_active = el.text.replace(\"Last Active \", \"\")\n break\n if self.rank is not \"Veteran\":\n if not(parser.invalid_tokens(last_active, False)) and self.profiles[i].get_name() not in ignore_list:\n inactivity_length = 90 if self.rank == \"Member\" else 60\n try:\n day = int(last_active.split(' ',3)[1])\n month = last_active.split(' ',3)[0]\n year = int(last_active.split(' ',3)[2])\n diff = today - datetime.date(year, parser.abbr_to_num(month), day)\n\n if diff.days >= inactivity_length:\n if self.rank == \"Member\":\n ignore_list.append(self.profiles[i].get_name()) #avoid double demotion\n\n print(self.profiles[i].get_name() + \" - \" + str(diff.days) + \" days\" + \" - \" + self.profiles[i].get_url())\n demoted_names.append(self.profiles[i].get_name())\n if full_demotion_process:\n self.browser.add_note()\n except:\n print(self.profiles[i].get_name() + \" - (private) - \" + \" - \" + self.profiles[i].get_url())\n demoted_names.append(self.profiles[i].get_name())\n if full_demotion_process:\n self.browser.add_note()\n\n else:\n if not(parser.invalid_tokens(last_active, True)) and self.profiles[i].get_name() not in ignore_list:\n print(self.profiles[i].get_name() + \" - \" + last_active + \" - \" + self.profiles[i].get_url())\n demoted_names.append(self.profiles[i].get_name())\n if full_demotion_process:\n self.browser.add_note()\n i+=1\n demoted_names = sorted(demoted_names, key=lambda s: s.lower())\n for el in demoted_names:\n print(el)\n\n'''Main Code'''\n\ndef main():\n\n browser = Browser()\n browser.login()\n scraper = Scraper(browser, \"Member\")\n users = scraper.collect()\n for el in users:\n print(el.get_name() + ' - ' + el.get_url())\n demoter = Demoter(browser, users, \"Member\")\n demoter.verify()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"demotions.py","file_name":"demotions.py","file_ext":"py","file_size_in_byte":7618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"36599264","text":"def get_prop(props=[], name=[]):\n for p in props:\n try:\n if p[\"name\"] == name:\n return p\n except KeyError:\n return None\n\n# returns a middleware. a middleware is a function that takes and ast element and returns an other ast element\n# in the case of this middleware, we inspect the type of el and call it with a context.\n# with this middlware a component can now return a function taking a context as argument and returning an el.\ndef context_middleware(context):\n\n def middleware(el):\n if callable(el):\n return el(context)\n else:\n return el\n return middleware\n\ndef default_middleware(el, _props = [], _children=[]):\n return el\n\nimport math\nimport random\n\n\ndef diff_asts(old, new):\n patches = []\n new_tree = {}\n old_name = old[\"name\"]\n old_props = old[\"props\"]\n new_name = new[\"name\"]\n new_props = new[\"props\"]\n\n # if elements not same name\n # request to patch innerHTML with chp-id of the old one\n\n if old_name != new_name:\n patches.append({\n \"type\": \"replace-element\",\n \"chp-id\": get_prop(old_props, \"chp-id\")[\"value\"],\n \"html\": render_element(new),\n })\n new_tree = new\n # id = get_prop('chp-id', new_tree[\"props\"])\n # id[\"value\"] = get_prop('chp-id', new[\"props\"])[\"value\"]\n\n # else\n # go through props\n # excluding children\n # if props differ\n # request patching with new props (but keep old chp-id)\n\n else:\n i = 0\n props_differ = False\n if len(old_props) != len(new_props):\n props_differ = True\n\n if not props_differ:\n while i < len(new_props):\n c1 = old_props[i][\"name\"] != new_props[i][\"name\"]\n c2 = old_props[i][\"value\"] != new_props[i][\"value\"]\n\n if c1 or c2:\n if new_props[i][\"name\"] != \"children\":\n if new_props[i][\"name\"] != \"chp-id\":\n props_differ = True\n i += 1\n\n\n if props_differ:\n id = get_prop(new_props, \"chp-id\")\n id[\"value\"] = get_prop(old_props, \"chp-id\")[\"value\"]\n patches.append({\n \"type\": \"props\",\n \"chp-id\": get_prop(old_props, \"chp-id\")[\"value\"],\n \"props\" : new_props\n })\n new_tree = new\n new_tree[\"props\"] = new_props\n else:\n new_tree = old\n\n\n nc = get_prop(new_props, \"children\")\n oc = get_prop(old_props, \"children\")\n new_children = nc[\"value\"] if nc else []\n old_children = oc[\"value\"] if oc else []\n\n if len(new_children) != len(old_children):\n html = \"\"\n if type(new_children) is str:\n html = new_children\n else:\n new_tree_children = get_prop(new_tree[\"props\"], 'children') # ref to new_tree's props\n new_tree_children[\"value\"] = []\n for c in new_children:\n html += render_element(c)\n # new_tree\n new_tree_children[\"value\"].append(c)\n\n patches.append({\n \"type\": \"innerHTML\",\n \"chp-id\": get_prop(old_props, \"chp-id\")[\"value\"],\n \"html\": html,\n })\n else:\n new_tree_children = get_prop(new_tree[\"props\"], 'children') # ref to new_tree's props\n new_tree_children[\"value\"] = get_prop(new[\"props\"], 'children')[\"value\"] # ref to new_tree's props\n if type(new_children) is str:\n new_tree_children[\"value\"] = new_children\n else:\n new_tree_children[\"value\"] = []\n i = 0\n while i < len(new_children):\n child_diff = diff_asts(old_children[i], new_children[i])\n ps = child_diff[0]\n for p in ps:\n patches.append(p)\n i += 1\n\n new_tree_children[\"value\"].append(child_diff[1])\n\n # go through children\n # if new one missing\n # request deletion of old node (with chp-id)\n\n # if old one missing\n # request creation of new node\n\n return [patches, new_tree]\n\n\ndef render_html(el, props, child):\n name = el[\"name\"]\n\n children = \"\"\n for c in child:\n children += c\n\n props_str = \"\"\n for p in props:\n if p[\"name\"] != \"children\":\n props_str += (p[\"name\"] + \"=\\\"\" + p[\"value\"] + \"\\\"\")\n\n self_closing_tags = [\"input\", \"link\", \"img\"]\n if name in self_closing_tags:\n return f\"<{name} {props_str} />\"\n\n return f\"<{name} {props_str}>{children}\"\n\ndef render_js(el, props, child):\n name = el[\"name\"]\n props_str = \"\"\n\n children = \"\"\n for c in child:\n children += c\n\n before=get_prop(props, \"before\")[\"value\"]\n after=get_prop(props, \"after\")[\"value\"]\n\n return f\"{before}{children}{after}\"\n\ndef id_middleware(ast):\n props = ast[\"props\"]\n props.append({\n \"name\": \"chp-id\",\n \"value\": str(math.floor(random.random()*10000000))\n })\n return ast\n\n\ndef render_ast(ast, ast_middleware, render_middleware):\n ast = ast_middleware(ast)\n\n props = ast[\"props\"]\n\n children = False\n for p in props:\n if p[\"name\"] == \"children\":\n children = p[\"value\"]\n\n child = []\n if not children:\n child = \"\"\n elif type(children) is str:\n child = children\n else:\n for c in children:\n child.append(render_ast(c, ast_middleware, render_middleware))\n\n return render_middleware(ast, props, child)\n\ndef inject_ids(ast):\n return render_ast(ast, id_middleware, default_middleware)\n\ndef render_js_element(ast):\n return render_ast(ast, default_middleware, render_js)\n\ndef render_element(ast, middleware=default_middleware):\n return render_ast(ast, middleware, render_html)\n\ndef create_element(name, props, children):\n props.append({\n \"name\": \"children\",\n \"value\": children,\n })\n\n return {\n \"name\": name,\n \"props\": props,\n }\n\ndef create_prop(name, value):\n return {\n \"name\": name,\n \"value\": value,\n }\n\ndef get_prop(props=[], name=[]):\n for p in props:\n try:\n if p[\"name\"] == name:\n return p\n except KeyError:\n return None\n\ndef create_context(value):\n return [{\n \"name\": \"__context\",\n \"value\": value,\n }]\n\n\nce = create_element\ncp = create_prop\n","sub_path":"chp/pyreact.py","file_name":"pyreact.py","file_ext":"py","file_size_in_byte":6483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"200192313","text":"import os, os.path\r\nfrom os import scandir\r\nfrom PIL import Image, ImageDraw, ImageFont\r\nimport face_recognition\r\nimport numpy as np\r\nimport pickle\r\nfrom flaskapp.models import Person\r\n\r\npath = \"./flaskapp/static/images/dataset/\"\r\n\r\ndef scantree(path):\r\n \"\"\"Recursively yield DirEntry objects for given directory.\"\"\"\r\n for entry in scandir(path):\r\n if entry.is_dir(follow_symlinks=False):\r\n yield from scantree(entry.path)\r\n else:\r\n yield entry\r\n\r\n\r\n####################################################################\r\n# Save all face encodings to dataset_faces.dat #\r\n####################################################################\r\ndef make_new_face_encodings():\r\n # save face encodings in dataset_faces.dat using pickle\r\n all_face_encodings = {}\r\n\r\n for file in scantree(path):\r\n f_name = file.name\r\n f_person_id = f_name.split(\"_\")[0]\r\n print(f_name)\r\n print(f_person_id)\r\n image = face_recognition.load_image_file(file.path)\r\n \r\n face_encodings = face_recognition.face_encodings(image)\r\n if len(face_encodings) > 0:\r\n \tall_face_encodings[f_person_id] = face_encodings[0]\r\n\r\n # save encoding\r\n with open('./flaskapp/dataset_faces.dat', 'wb') as f:\r\n pickle.dump(all_face_encodings, f)\r\n\r\n\r\n####################################################################\r\n# Register faces in image #\r\n####################################################################\r\ndef hasSingleFace(image):\r\n\t# Load the jpg file into a numpy array\r\n\timage = face_recognition.load_image_file(image)\r\n\r\n\t# Find all the faces in the image using the default HOG-based model.\r\n\t# This method is fairly accurate, but not as accurate as the CNN model and not GPU accelerated.\r\n\t# See also: find_faces_in_picture_cnn.py\r\n\tface_locations = face_recognition.face_locations(image)\r\n\r\n\t# Find all the faces in the image using a pre-trained convolutional neural network.\r\n\t# This method is more accurate than the default HOG model, but it's slower\r\n\t# unless you have an nvidia GPU and dlib compiled with CUDA extensions. But if you do,\r\n\t# this will use GPU acceleration and perform well.\r\n\t# See also: find_faces_in_picture.py\r\n\t# face_locations = face_recognition.face_locations(image, number_of_times_to_upsample=0, model=\"cnn\")\r\n\r\n\tprint(\"I found {} face(s) in this photograph.\".format(len(face_locations)))\r\n\r\n\t# Return face image only if there is only one face in the image\r\n\tif len(face_locations) == 1:\r\n\t\t# Print the location of each face in this image\r\n\t\ttop, right, bottom, left = face_locations[0]\r\n\t\tprint(\"A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}\".format(top, left, bottom, right))\r\n\r\n\t\t# You can access the actual face itself like this:\r\n\t\tface_image = image[top:bottom, left:right]\r\n\t\tpil_image = Image.fromarray(face_image)\r\n\r\n\t\treturn pil_image\r\n\telse:\r\n\t\treturn None\r\n\r\n\r\n####################################################################\r\n# Detect faces in image #\r\n####################################################################\r\ndef detectFaces(image):\r\n\t# Load the jpg file into a numpy array\r\n\timage = face_recognition.load_image_file(image)\r\n\r\n\t# Find all the faces in the image using the default HOG-based model.\r\n\t# This method is fairly accurate, but not as accurate as the CNN model and not GPU accelerated.\r\n\t# See also: find_faces_in_picture_cnn.py\r\n\tface_locations = face_recognition.face_locations(image)\r\n\r\n\t# Find all the faces in the image using a pre-trained convolutional neural network.\r\n\t# This method is more accurate than the default HOG model, but it's slower\r\n\t# unless you have an nvidia GPU and dlib compiled with CUDA extensions. But if you do,\r\n\t# this will use GPU acceleration and perform well.\r\n\t# See also: find_faces_in_picture.py\r\n\t# face_locations = face_recognition.face_locations(image, number_of_times_to_upsample=0, model=\"cnn\")\r\n\r\n\tprint(\"I found {} face(s) in this photograph.\".format(len(face_locations)))\r\n\r\n\t# Convert the image to a PIL-format image so that we can draw on top of it with the Pillow library\r\n\t# See http://pillow.readthedocs.io/ for more about PIL/Pillow\r\n\tpil_image = Image.fromarray(image)\r\n\t# Create a Pillow ImageDraw Draw instance to draw with\r\n\tdraw = ImageDraw.Draw(pil_image)\r\n\r\n\tfor face_location in face_locations:\r\n\t # Print the location of each face in this image\r\n\t top, right, bottom, left = face_location\r\n\t print(\"A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}\".format(top, left, bottom, right))\r\n\r\n\t # You can access the actual face itself like this:\r\n\t # face_image = image[top:bottom, left:right]\r\n\t # pil_image = Image.fromarray(face_image)\r\n\t \r\n\t # Draw a box around the face using the Pillow module\r\n\t draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255), width=3)\r\n\r\n\t# Remove the drawing library from memory as per the Pillow docs\r\n\tdel draw\r\n\r\n\t# Display the resulting image\r\n\t# pil_image.show()\r\n\treturn pil_image\r\n\r\n\r\n####################################################################\r\n# Recognize faces in image #\r\n####################################################################\r\ndef recognizeFaces(image):\r\n\t# Get the current working directory (cwd)\r\n\tcwd = os.getcwd()\r\n\t# Get all the files in that directory\r\n\tfiles = os.listdir(cwd)\r\n\tprint(\"Files in '%s': %s\" % (cwd, files))\r\n\t# Load face encodings\r\n\twith open('./flaskapp/dataset_faces.dat', 'rb') as f:\r\n\t\tall_known_face_encodings = pickle.load(f)\r\n\r\n\t# Grab the list of names and the list of encodings\r\n\tknown_face_names = list(all_known_face_encodings.keys())\r\n\tknown_face_encodings = np.array(list(all_known_face_encodings.values()))\r\n\r\n\t# Load an image with an unknown face\r\n\tunknown_image = face_recognition.load_image_file(image)\r\n\r\n\t# Find all the faces and face encodings in the unknown image\r\n\tface_locations = face_recognition.face_locations(unknown_image)\r\n\tface_encodings = face_recognition.face_encodings(unknown_image, face_locations)\r\n\r\n\t# Convert the image to a PIL-format image so that we can draw on top of it with the Pillow library\r\n\t# See http://pillow.readthedocs.io/ for more about PIL/Pillow\r\n\tpil_image = Image.fromarray(unknown_image)\r\n\t# Create a Pillow ImageDraw Draw instance to draw with\r\n\tdraw = ImageDraw.Draw(pil_image)\r\n\r\n\t# Loop through each face found in the unknown image\r\n\tfor (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):\r\n\t\t# See if the face is a match for the known face(s)\r\n\t\tmatches = face_recognition.compare_faces(known_face_encodings, face_encoding)\r\n\r\n\t\tname = \"Unknown\"\r\n\r\n\t\t# # If a match was found in known_face_encodings, just use the first one.\r\n\t\t# if True in matches:\r\n\t\t# first_match_index = matches.index(True)\r\n\t\t# id = known_face_names[first_match_index]\r\n\t\t# name = Person.query.get(id).name\r\n\r\n\t\t# Or instead, use the known face with the smallest distance to the new face\r\n\t\tface_distances = face_recognition.face_distance(known_face_encodings, face_encoding)\r\n\t\tbest_match_index = np.argmin(face_distances)\r\n\t\tif matches[best_match_index]:\r\n\t\t\t# add 1 beacuse sqlalchemy starts id from 1\r\n\t\t id = int(known_face_names[best_match_index])\r\n\t\t name = Person.query.get(id).name\r\n\r\n\t\t# Draw a box around the face using the Pillow module\r\n\t\tdraw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255))\r\n\r\n\t\tfontsize = 1 # starting font size\r\n\r\n\t\t# portion of image width you want text width to be\r\n\t\timg_fraction = 0.98\r\n\r\n\t\tfont = ImageFont.truetype(\"./app/static/fonts/GoogleSans-Regular.ttf\", fontsize)\r\n\t\twhile font.getsize(name)[0] < img_fraction * (right - left):\r\n\t\t # iterate until the text size is just larger than the criteria\r\n\t\t fontsize += 1\r\n\t\t font = ImageFont.truetype(\"./flaskapp/static/fonts/GoogleSans-Regular.ttf\", fontsize)\r\n\r\n\t\t# optionally de-increment to be sure it is less than criteria\r\n\t\tfontsize -= 1\r\n\t\tfont = ImageFont.truetype(\"./flaskapp/static/fonts/GoogleSans-Regular.ttf\", fontsize)\r\n\t\t# Draw a label with a name below the face\r\n\t\ttext_width, text_height = draw.textsize(name)\r\n\t\tdraw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 0, 255), outline=(0, 0, 255), width=3)\r\n\t\tdraw.text((left + 6, bottom - text_height - 5), name, fill=(255, 255, 255, 255), font=font)\r\n\r\n\t# Remove the drawing library from memory as per the Pillow docs\r\n\tdel draw\r\n\r\n\t# Display the resulting image\r\n\t# pil_image.show()\r\n\treturn pil_image","sub_path":"flaskapp/faces.py","file_name":"faces.py","file_ext":"py","file_size_in_byte":8651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"304660127","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n\n def add_node(self,x):\n if self.next is None:\n self.next=ListNode(x)\n else:\n self.next.add_node(x)\n \n ListNode.add_node=add_node\n\n added_val=l1.val+l2.val\n cache=int((added_val)/10)\n l1_add_l2=ListNode((added_val)%10) \n while l1.next is not None or l2.next is not None:\n if l1.next is not None:\n l1=l1.next\n else:\n l1=ListNode(0)\n if l2.next is not None:\n l2=l2.next\n else:\n l2=ListNode(0)\n added_val=l1.val+l2.val+cache\n cache=int((added_val)/10)\n l1_add_l2.add_node((added_val)%10) \n if cache>0 and l1.next is None and l2.next is None:\n l1_add_l2.add_node(cache) \n return l1_add_l2\n","sub_path":"Medium/2_Add Two Numbers.py","file_name":"2_Add Two Numbers.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"26753087","text":"# -*- coding: UTF-8 -*-\n\"\"\"\napp1.py: First Python-Flask webapp\n\"\"\"\nfrom flask import Flask, render_template, jsonify, request \nfrom cupshelpers.cupshelpers import Device\napp = Flask(__name__) # Construct an instance of Flask class\n@app.route('/')\ndef index():\n return render_template('index.html')\n@app.route('/api/picontroller', methods=['POST', 'GET']) \ndef get_picontroller():\n device =request.args.get('device')\n \n if device != \"\":\n return jsonify({'device': device}) \n \n\nif __name__ == '__main__': # Script executed directly\n app.run() # Launch built-in web server and run this Flask webapp\n","sub_path":"api/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"548457306","text":"import tests.asa_object_test as asa\n\nOBJECT_NAME = \"NOBJ\"\nHOST = \"8.8.8.8\"\nNETWORK = \"192.168.12.0/24\"\nLIST = [\"192.168.0.1\", \"192.168.0.100\"]\nFQDN = \"host.example.local\"\n\n# ASA section\nasa.named_object_test(OBJECT_NAME)\nasa.addressed_object_test(HOST, NETWORK, FQDN, LIST)\nasa.unknown_addressed_object_test()\n","sub_path":"tests/main_test.py","file_name":"main_test.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"78660339","text":"from flask import Flask, render_template, request\nimport pickle\nimport numpy as np\nimport datetime\nimport random\nfrom datetime import date\nimport pandas as pd\n\napp=Flask(__name__)\n\n@app.route('/')\ndef home():\n return render_template(\"test.html\")\n\ndef ValuePredictor(to_predict_list):\n to_predict = np.array(to_predict_list).reshape(1,8)\n to_predict1 = loaded_model1.transform(to_predict)\n result = loaded_model.predict(to_predict1)\n return result[0]\n\n@app.route('/aicalculatorresult',methods = ['POST'])\ndef aicalculatorresult():\n prediction=''\n if request.method == 'POST':\n to_predict_list = request.form.to_dict()\n \n\n\n \n \n df=pd.read_excel('/Users/lota/Downloads/Demo自動配對.xlsx')\n df = df.drop(df.columns[[ 7,8,9,10,11,12,13,14,15,16,17,18,19 ]], axis=1)\n df=df.loc[df['失效日期'].dt.date>=date.today()]\n df.派單上限=df.派單上限.fillna(10)\n df.預算上限=df.預算上限.fillna(20)\n df['超標']=df['派單上限']-df['9月派單']\n df=df.where(df['超標']>0)\n df=df.dropna()\n df3=df\n df = df.drop(df.columns[[0,8]], axis=1)\n df=pd.get_dummies(df,columns=['預算上限','風格'])\n pp=0\n for i in df['預算上限_40']:\n if i==1:\n df['預算上限_20'][df.index[pp]]=df['預算上限_20'][df.index[pp]]+1\n pp+=1\n pp=0\n for i in df['預算上限_50']:\n if i==1:\n df['預算上限_20'][df.index[pp]]=df['預算上限_20'][df.index[pp]]+1\n df['預算上限_40'][df.index[pp]]=df['預算上限_40'][df.index[pp]]+1\n pp+=1\n customer=pd.DataFrame([[str(to_predict_list['z1']),str(to_predict_list['z2']),str(to_predict_list['z3']),str(to_predict_list['z4'])]], columns=['名字', '預算上限', '風格','裝修/設計'],index=[1000])\n customer=customer.drop(customer.columns[[0]], axis=1)\n zz=pd.get_dummies(customer,columns=['預算上限','風格'])\n df=df.append(zz)\n df=df.drop(df.columns[[2,3]], axis=1)\n df=df.fillna(0)\n num=df.index[-1]\n if df['預算上限_40'][num]==1:\n df['預算上限_20'][num]=df['預算上限_20'][num]+1\n if df.iloc[df.shape[0]-1,:]['預算上限_50']==1:\n df['預算上限_20'][num]=df['預算上限_20'][num]+1\n df['預算上限_40'][num]=df['預算上限_40'][num]+1\n df=df.where(df['裝修/設計']==df['裝修/設計'][1000])\n df=df.dropna()\n df3=df\n df = df.drop(df.columns[[1,2]], axis=1)\n cha=['預算上限_20', '預算上限_40', '預算上限_50', '預算上限_50-100', '風格_古典風',\n '風格_現代風']\n cc=1\n ca=0\n big=[]\n finalrow=df.shape[0]-1\n for e in range(6):\n z=0\n ind=[]\n if df.iloc[finalrow,:][cc]==1:\n for i in df.iloc[0:finalrow,:][cha[ca]]:\n if i==df.iloc[finalrow,:][cc]:\n ind.append(z)\n \n z+=1\n big.append(ind)\n cc+=1\n ca+=1\n import itertools\n v=set.intersection(set(big[0]),*itertools.islice(big,1,None))\n cnn=1\n for i in list(v):\n if cnn!=4:\n \n cnn+=1\n else:\n break\n df3['neworder']=list(range(df3.shape[0]))\n df3=df3.set_index('neworder')\n cq=df3['評分'].to_dict()\n ra={}\n for i in list(v):\n q={i:cq[i]}\n ra.update(q)\n finallist={k: v for k, v in sorted(ra.items(), key=lambda item: item[1],reverse=True)}\n newv=list(finallist.keys())[0:3]\n cnn=1\n com=[]\n for i in newv:\n if cnn!=4:\n com.append(df.iloc[i,:][0])\n cnn+=1\n else:\n break\n try:\n c1=com[0]\n except:\n c1='沒有匹配'\n try:\n c2=com[1]\n except:\n c2='沒有匹配'\n try:\n c3=com[2]\n except:\n c3='沒有匹配'\n \n from keras.preprocessing import image\n\n a='YBJ'\n image_path=\"/Users/lota/Downloads/c21/static/{}.jpg\".format(a)\n img = image.load_img(image_path)\n\n return render_template(\"aicalculatorresult.html\",com=com,c1=c1,c2=c2,c3=c3,img=img)\n\n@app.route(\"/aicalculator\", methods=['POST','GET'])\ndef aicalculator():\n return render_template('aicalculator.html')\n\n@app.route(\"/test\", methods=['POST','GET'])\ndef test():\n\n return render_template('test.html')\n@app.route(\"/result\", methods=['POST','GET'])\ndef result():\n if request.method == 'POST':\n to_predict_list = request.form.to_dict()\n name=to_predict_list['userame']\n place=to_predict_list['place']\n budget=to_predict_list['budget']\n consider=to_predict_list['consider']\n favour=to_predict_list['favour']\n ft=str(to_predict_list['userame'])+'正在'+str(to_predict_list['place'])+'尋找一所樓盤,樓盤中的'+str(to_predict_list['consider'])+'對他來說非常重要。'\n if str(to_predict_list['place'])=='九龍':\n \n g2=random.choice([2,3]) \n if g2==2:\n a1='彩明苑'\n else:\n a1='富澤花園'\n g3=random.choice([5,6]) \n if g3==5:\n a2='新峰花園'\n else:\n a2='麗港城'\n g4=random.choice([8,9]) \n if g4==8:\n a3='帝景臺'\n else:\n a3='康柏苑'\n elif str(to_predict_list['place'])=='新界':\n \n g2=random.choice([11,12])\n if g2==11:\n a1='大興花園'\n else:\n a1='沙田第一城' \n g3=random.choice([14,15])\n if g3==14:\n a2='荃灣中心'\n else:\n a2='瓊華樓' \n g4=random.choice([17,18]) \n if g4==17:\n a3='栢慧豪園'\n else:\n a3='翠濤閣'\n elif str(to_predict_list['place'])=='香港島':\n \n g2=random.choice([4,7])\n if g2==4:\n a1='白居二'\n else:\n a1='康山花園' \n g3=random.choice([10,13]) \n if g3==10:\n a2='年豐大廈'\n else:\n a2='民新大廈' \n g4=random.choice([16,19]) \n if g4==16:\n a3='南里壹號'\n else:\n a3='丹拿花園'\n \n \n return render_template('result.html',name=name,place=place,budget=budget,consider=consider,favour=favour,ft=ft,g2=g2,g3=g3,g4=g4,a2=a2,a3=a3,a1=a1)\n\n@app.route('/',methods = ['POST'])\ndef home1():\n if request.method == 'POST':\n return render_template(\"home.html\")\n@app.route('/ccc',methods = ['POST','GET'])\ndef ccc():\n\n return render_template(\"ccc.html\")\n@app.route('/cccresult',methods = ['POST','GET'])\ndef cccresult():\n\n return render_template(\"cccresult.html\")\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"ec.py","file_name":"ec.py","file_ext":"py","file_size_in_byte":7308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"419777129","text":"#https://docs.opencv.org/3.2.0/d7/d8b/tutorial_py_lucas_kanade.html\nimport numpy as np\nimport cv2\nfrom skvideo.io import FFmpegWriter\n\n# params for ShiTomasi corner detection\nfeature_params = dict( maxCorners = 100,\n qualityLevel = 0.3,\n minDistance = 7,\n blockSize = 7 )\n# Parameters for lucas kanade optical flow\nlk_params = dict( winSize = (15,15),\n maxLevel = 2,\n criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n# Create some random colors\ncolor = np.random.randint(0,255,(100,3))\n\n\n\n\n\nactions = ['fight', 'handclapping', 'handshake', 'hug', 'jog', 'jump',\\\n 'punch', 'push', 'skip', 'walk', 'wave1', 'wave2']\nn_classes = len(actions)\ntrainfiles = list(range(1,16)) + list(range(20,29)) + list(range(33,41)) + list(range(43,51))\ntestfiles = [16, 17, 18, 19, 29, 30, 31, 32, 41, 42]\n\nfile_iterator = 0\ncurr_file = 0\n\nfor act in actions:\n for video in range(50):\n path = 'Data/InfAR_Dataset_1.0/' + act + '/' +\\\n '%06d' % (video+1) + '_' + act + '.avi'\n print(path)\n\n cap = cv2.VideoCapture(path)\n totalFrames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n # Take first frame and find corners in it\n ret, old_frame = cap.read()\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)\n # Create a mask image for drawing purposes\n mask = np.zeros_like(old_frame)\n\n height , width , layers = old_frame.shape\n new_path = 'Data/InfAR_Dataset_LK/' + act + '/' +\\\n '%06d' % (video+1) + '_' + act + '.avi'\n new_video = FFmpegWriter(new_path)#, frameSize=(width,height))\n\n for i in range(totalFrames-1):\n\n ret,frame = cap.read()\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # calculate optical flow\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\n try:\n # Select good points\n good_new = p1[st==1]\n good_old = p0[st==1]\n # draw the tracks\n for i,(new,old) in enumerate(zip(good_new,good_old)):\n a,b = new.ravel()\n c,d = old.ravel()\n mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)\n frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1)\n img = cv2.add(frame,mask)\n new_video.writeFrame(img)\n except:\n print(\"oups\")\n\n\n #cv2.imshow('frame',img)\n #k = cv2.waitKey(30) & 0xff\n #if k == 27:\n # break\n # Now update the previous frame and previous points\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1,1,2)\n\n cap.release()\n new_video.close()\ncv2.destroyAllWindows()","sub_path":"Suplemental Code/OpticalFlow/transformAllVideosToLKOF.py","file_name":"transformAllVideosToLKOF.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"63717782","text":"from benefit import *\nfrom surrender import *\nfrom mortality import *\nfrom construction_of_the_tree import *\nfrom backward_traversal_of_the_tree_to_get_P_0 import *\nfrom optimal_tree_builder import *\nimport scipy.optimize as opti\nfrom math import sqrt, factorial\n\nclass Contract_Value:\n \n def __init__(\n self,\n nb_year = 5,\n nb_of_trading_date = 6,\n risk_free_rate = 0.06,\n volatility = 0.25,\n C = [0,1], \n beta = 1,\n g = 0.03,\n qx_rate = Qx.us_x0_55,\n benefit_function = Benefit.point_to_point_w_global_protection_of_premiums,\n surrender_value_function = Surrender.no_surrender,\n numerical_precision = 1e-5\n \n ):\n self.nb_year = nb_year\n self.nb_of_trading_date = nb_of_trading_date\n self.risk_free_rate = risk_free_rate \n self.volatility = volatility\n self.C = C\n self.beta = beta\n self.g = g\n self.qx_rate = qx_rate\n self.benefit_function = benefit_function\n self.surrender_value_function = surrender_value_function\n self.numerical_precision = numerical_precision\n \n self.n = self.nb_year\n self.qx = qx_rate\n self.r = risk_free_rate\n \n self.W, self.W_prob = self.get_W_and_W_prob()\n \n def value(self, alpha, build_optimal_tree = False):\n benefits = Benefit(alpha, self.beta, self.g, self.benefit_function)\n surrender = Surrender(self.n, self.surrender_value_function)\n \n builder = Construction_Of_The_Tree(self.n, benefits, surrender, self.W, self.C, self.W_prob, self.qx)\n tree = builder.get_tree()\n \n traverser = Backward_Traversal_Of_The_Tree_To_Get_P_0(tree, self.n, self.r)\n P_0 = traverser.get_P_0()\n \n if build_optimal_tree :\n self.optimal_tree = Optimal_Tree(tree, self)\n \n return P_0\n \n def optimal_alpha(self, build_optimal_tree = False):\n optiResult = opti.brentq(\n self.value,\n 0,\n 1,\n xtol = self.numerical_precision,\n rtol = self.numerical_precision,\n full_output = True\n )\n \n optimal_alpha = optiResult[0]\n \n self.optimal_alpha = optimal_alpha\n \n self.value(optimal_alpha, build_optimal_tree)\n \n return optimal_alpha\n \n def get_W_and_W_prob(self):\n N = self.nb_of_trading_date\n v = self.volatility\n r = self.r\n \n u = exp( v / sqrt(N) )\n d = 1 / u\n \n p = ( exp( r / N ) - d ) / ( u - d )\n q = 1 - p \n \n \n W = []\n W_prob = []\n \n for i in range(N+1):\n move = u ** (N - i) * d ** i\n\n coef = self.nCr( N , i)\n prob = coef * p ** (N - i) * q ** i\n\n W.append(move) \n W_prob.append(prob)\n \n return W, W_prob\n \n def nCr(self, n, i) : return factorial(n) / factorial(i) / factorial(n - i)\n ","sub_path":"contract_value.py","file_name":"contract_value.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"274937943","text":"import sys\r\nimport random\r\nfrom PyQt5.QtCore import *\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtWidgets import *\r\nimport PyQt5\r\n\r\n\r\n\r\n\r\nif hasattr(PyQt5.QtCore.Qt, 'AA_EnableHighDpiScaling'):\r\n PyQt5.QtWidgets.QApplication.setAttribute(PyQt5.QtCore.Qt.AA_EnableHighDpiScaling, True)\r\n\r\nif hasattr(PyQt5.QtCore.Qt, 'AA_UseHighDpiPixmaps'):\r\n PyQt5.QtWidgets.QApplication.setAttribute(PyQt5.QtCore.Qt.AA_UseHighDpiPixmaps, True)\r\n\r\n\r\nclass bar(QWidget):\r\n\tdef __init__(self, numLabels, parent=None):\r\n\t\tsuper(bar, self).__init__(parent)\r\n\t\tself.layoutUsed = QVBoxLayout()\r\n\t\tself.setLayout(self.layoutUsed)\r\n\t\tself.layoutUsed.addWidget(QLabel(\"Hi Bob\"))\r\n\t\tfor i in range(numLabels):\r\n\t\t\tnewLabel = QLabel(\"Wee: {}/{}\".format(i, numLabels))\r\n\t\t\tself.layoutUsed.addWidget(newLabel)\r\n\t\t# self.show()\r\n\t\treturn\r\n\r\nclass Foo(QDialog):\r\n\tdef __init__(self, parent=None):\r\n\t\tsuper(Foo, self).__init__(parent)\r\n\t\tself.outerLayout = QVBoxLayout()\r\n\r\n\r\n\t\tself.setLayout(self.outerLayout)\r\n\r\n\r\n\t\tself.setWindowTitle('Dummy Gui')\r\n\r\n\t\tself.tabs = QTabWidget()\r\n\t\tself.outerLayout.addWidget(self.tabs)\r\n\r\n\t\tself.tabs.addTab(bar(5), \"hi\")\r\n\t\tself.tabs.addTab(bar(1), \"Bob\")\r\n\r\n\r\n\t\t# self.testBar = bar(5)\r\n\t\t# self.outerLayout.addWidget(self.testBar)\r\n\t\t# self.testBar = bar(5)\r\n\t\t# self.outerLayout.addWidget(self.testBar)\r\n\r\n\r\n\t\tself.outerLayout.addWidget(QLabel(\"Series Local Name\"))\r\n\t\treturn\r\n\r\n\r\nsys._excepthook = sys.excepthook\r\n\r\ndef my_exception_hook(exctype, value, traceback):\r\n # Print the error and traceback\r\n print(exctype, value, traceback)\r\n # Call the normal Exception hook after\r\n sys._excepthook(exctype, value, traceback)\r\n sys.exit(0)\r\n\r\n# Set the exception hook to our wrapping function\r\nsys.excepthook = my_exception_hook\r\n\r\napp = QApplication(sys.argv)\r\ngui = Foo()\r\ngui.show()\r\napp.exec_()","sub_path":"labInterface/GuiTest.py","file_name":"GuiTest.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"203043051","text":"from sqlite import creteTable, queryJson\n\nimport http.server\n\nclass HTTPHandler(http.server.BaseHTTPRequestHandler):\n\n def do_GET(self):\n if 'api?stations' in self.path:\n jsonData = queryJson()\n\n self.send_response(200)\n self.send_header(\"Access-Control-Allow-Origin\", \"*\")\n self.send_header('Content-type', 'text/json')\n self.end_headers()\n self.wfile.write(jsonData.encode(encoding='utf_8'));\n \n return\n \ndef init():\n # configure httpd parameters\n server_addr = ('localhost', 8000)\n \n # instantiate a server object\n httpd = http.server.HTTPServer(server_addr, HTTPHandler)\n \n # start serving pages\n print(\"Server started...\")\n httpd.serve_forever ()\n \nif __name__ == \"__main__\":\n creteTable()\n init()\n","sub_path":"Prototype/Server/UrbsDB/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"203326194","text":"def binary_search(arr, start, end, target):\n while start <= end:\n mid = (start + end) // 2\n if arr[mid] < target:\n start = mid + 1\n elif arr[mid] > target:\n end = mid - 1\n else:\n return mid\n return -1\n\n\n# arr = list(range(10))\n# start = 0\n# end = len(arr) - 1\n# target = 10\n# print(binary_search(arr,start, end, target))\n\n\ndef binary_search_rot(arr):\n lo = 0\n hi = len(arr) - 1\n # // find the index of the smallest value using binary search.\n # // Loop will terminate since mid < hi, and lo or hi will shrink by at least 1.\n # // Proof by contradiction that mid < hi: if mid==hi, then lo==hi and loop would have been terminated.\n while lo < hi:\n print(arr[lo: hi+1])\n mid = int((lo+hi) / 2)\n\n if arr[mid] > arr[hi]:\n lo = mid+1\n else:\n hi = mid\n \n # lo==hi is the index of the smallest value and also the number of places rotated.\n return arr[lo]\n\n\nprint(binary_search_rot([6,7,1,2,3,4]))","sub_path":"solutions/bi-search.py","file_name":"bi-search.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"611839008","text":"import sys\nimport os\nsys.path.append(os.getcwd())\n\nimport random\nimport numpy as np\nimport pandas as pd\nimport glob\n\nfrom functools import partial\nfrom tqdm import tqdm\n\nfrom keras.preprocessing import image\nfrom sklearn.model_selection import train_test_split\nfrom keras.utils import to_categorical\n\nfrom src import config\nfrom src.utils import specAiHelper\nfrom src.utils import databaseHelper\nfrom src.utils import visHelper\n\ntqdm = partial(tqdm, position=0, leave=True)\ndbTurtle = databaseHelper.Turtle(filename=config.SpecAI.dbFile)\n\n\nclass Mouse:\n def __init__(self, input_shape, epoch, dataSize, modelID, trainCSV, testCSV, trainFolder, testFolder):\n \"\"\"\n :param input_shape: shape of image\n :param epoch: runs the model is trained for\n :param dataSize: how many imgs are used to train\n :param modelID: the model structure ID;\n :param trainCSV: CSV file path\n :param testCSV: CSV file path\n :param trainFolder: Training imgs folder\n :param testFolder: Testing imgs folder\n \"\"\"\n self.input_shape = input_shape\n self.epoch = epoch\n self.dataSize = dataSize\n self.modelID = modelID\n\n self.trainCSV = trainCSV\n self.testCSV = testCSV\n self.trainFolder = trainFolder\n self.testFolder = testFolder\n self.aiTurtle = specAiHelper.Turtle()\n\n def preprocess(self):\n \"\"\"\n to split and clean up the data.\n :return: tuple of data for training the model\n \"\"\"\n train_image = []\n train = pd.read_csv(self.trainCSV)\n for i in tqdm(range(train.shape[0])):\n img = image.load_img(f\"{self.trainFolder}\\\\{train['filenames'][i]}\",\n target_size=(self.input_shape[0], self.input_shape[1], self.input_shape[2]),\n color_mode=\"rgb\" if self.input_shape[2] == 3 else \"grayscale\")\n img = image.img_to_array(img)\n img = img / 255\n train_image.append(img)\n\n X = np.array(train_image)\n y = train['emotion'].values\n y = to_categorical(y)\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size=0.2)\n\n return X_train, X_test, y_train, y_test\n\n def train(self, data):\n \"\"\"\n training model\n :param data: Data from the preprocess function\n :return: trained model\n \"\"\"\n model = self.aiTurtle.getModel(modelID=self.modelID, input_shape=self.input_shape)\n model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])\n\n # X_train, y_train X_test, , y_test)\n model.fit(data[0], data[2], epochs=self.epoch, validation_data=(data[1], data[3]))\n return model\n\n def evaluate(self):\n \"\"\"\n :return: accuracy score\n \"\"\"\n\n # getting predictions\n test_image = []\n testArray = pd.read_csv(self.testCSV)\n\n for i in tqdm(range(testArray.shape[0])):\n img = image.load_img(f\"{self.testFolder}\\\\{testArray['filenames'][i]}\",\n target_size=self.input_shape,\n color_mode=\"rgb\" if self.input_shape[2] == 3 else \"grayscale\")\n img = image.img_to_array(img)\n img = img / 255\n test_image.append(img)\n\n testArray = np.array(test_image)\n prediction = np.argmax(model.predict(testArray), axis=-1)\n\n # checking answers\n answers = []\n testData = pd.read_csv(self.testCSV)\n for i in testData['filenames']:\n answers.append(dbTurtle.findQuality(i.replace('.png', '.wav'), 'Emotion_ID'))\n points = 0\n for i in range(len(prediction)):\n if prediction[i] == answers[i]:\n points += 1\n\n return int(100 * (points / len(prediction)))\n\n\nif __name__ == \"__main__\":\n\n visTurtle = visHelper.Turtle()\n\n # reset Log file\n print(\"Resting log file\")\n with open(config.SpecAI.logFile, \"w\") as f:\n f.write(\"\")\n f.close()\n\n print(\"Starting training process\")\n for size in config.SpecAI.DATA_SIZE:\n # Generating data for training\n files = glob.glob(config.SpecAI.audioFolder + \"\\\\*\\\\*\\\\*.wav\")\n random.shuffle(files)\n if size is not None:\n if size > len(files):\n print(\"Size too big, loading all available files\")\n else:\n files = files[-size:]\n print(f\"Loading last {size} files and shuffling\")\n else:\n print(\"Loading all files and shuffling\")\n\n pbar = tqdm(files)\n for i in pbar:\n pbar.set_description(f\"Processing {i[-24:]}\")\n # slicing filename\n fname = config.SpecAI.specFolder + \"\\\\\" + i[-24:][:20] + \".png\"\n visTurtle.makeSpec(file=i, fname=fname)\n\n dbTurtle.prepareData(FolderFiles={config.SpecAI.testFolder: config.SpecAI.testCSV,\n config.SpecAI.trainFolder: config.SpecAI.trainCSV},\n TargetFolder=config.SpecAI.specFolder)\n\n for epoch in config.SpecAI.EPOCHS:\n for modelID in config.SpecAI.MODEL_STRUCTURE_ID:\n for input_shape in config.SpecAI.INPUT_SHAPES:\n\n \"\"\" Driver Code\"\"\"\n\n specAI = Mouse(input_shape=input_shape,\n epoch=epoch,\n dataSize=size,\n modelID=modelID,\n\n trainCSV=config.SpecAI.trainCSV,\n testCSV=config.SpecAI.testCSV,\n trainFolder=config.SpecAI.trainFolder,\n testFolder=config.SpecAI.testFolder\n )\n\n # preprocessing data\n data = specAI.preprocess()\n\n # training model\n model = specAI.train(data)\n\n # evaluating\n score = 0\n for i in range(5):\n score += specAI.evaluate()\n score = score / 5\n\n \"\"\" Logging \"\"\"\n with open(config.SpecAI.logFile, \"a\") as f:\n statement = f\"####################################################### \\n\" \\\n f\"Model : {modelID} Shape : {input_shape} Epochs : {epoch} Data Volume : {size}\\n\" \\\n f\"Average Accuracy : {score}%\" \\\n f\"\\n\"\n f.write(statement)\n f.close()\n\n \"\"\" Saving model \"\"\"\n # [size]_ [epochs]_ [modelID]_ [input_shape].h5\n modelName = f\"{str(size)}_{str(epoch)}_{str(modelID)}_{str(input_shape).replace(' ', '')}.h5\"\n model.save(f\"{config.SpecAI.SpecModelsFolder}\\\\{modelName}\")\n","sub_path":"src/AI/specAI.py","file_name":"specAI.py","file_ext":"py","file_size_in_byte":7180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"20127416","text":"#$Revision: 1.1 $\nimport FWCore.ParameterSet.Config as cms\nfrom FWCore.ParameterSet.VarParsing import VarParsing\noptions = VarParsing ('python')\n\noptions.register ('filepath',\n\t\tFalse,\n\t\tVarParsing.multiplicity.singleton,\n\t\tVarParsing.varType.string,\n\t\t\"Filepath input\")\noptions.parseArguments()\nfilepath = options.filepath\n\nprocess = cms.Process(\"TheNtupleMaker\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\n# See TheNtupleMaker twiki for a brief explanation\n#process.MessageLogger.destinations = cms.untracked.vstring(\"cerr\")\n#process.MessageLogger.cerr.FwkReport.reportEvery = 10\n#process.MessageLogger.cerr.default.limit = 5\n\n# This is required in order to configure HLTConfigProducer\nprocess.load(\"L1TriggerConfig.L1GtConfigProducers.L1GtConfig_cff\")\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\n#process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(10) )\n\nprocess.source = cms.Source(\"PoolSource\",\n\t\t\t\t\t\t\tfileNames =\n\t\t\t\t\t\t\tcms.untracked.vstring( \n\t\t\t\t\t\t\t\t#'file:'+filepath,\n\t\t\t\t\t\t\t\t#filepath,\n\t\t\t\t\t\t\t\t#\"file:VBFC1pmN2_C1ToTau_N2ToTauTau_LSP050_Stau195_Chargino200_qcd0_qed4_unwgt_MINIAODSIM_4.root\"\n\t\t\t\t\t\t\t\t#'/store/mc/Phys14DR/QCD_Pt-80to120_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_trkalmb_castor_PHYS14_25_V1-v2/00000/0629F4F9-E37C-E411-A6F3-002590D0AFF6.root',\n\t\t\t\t\t\t\t\t\t\t\t\t )\n\t\t\t\t\t\t\t)\nFILE = open(filepath)\nlines = FILE.readlines()\nFILE.close()\nfilelist = []\nfor line in lines:\n\tline = line.strip()\n\tif len(line) == 0:\n\t\tcontinue\n\tfilelist.append(\"file:\" + line)\nprocess.source.fileNames = filelist\n\nprocess.load(\"ntuples.VBF-LS-tau-ntupler.ntuple_cfi\")\n\nprocess.p = cms.Path(process.demo)\n","sub_path":"VBF-LS-tau-ntupler_cfg_globaldataset.py","file_name":"VBF-LS-tau-ntupler_cfg_globaldataset.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"530980389","text":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution(object):\n def reverseBetween(self, head, m, n):\n \"\"\"\n :type head: ListNode\n :type m: int\n :type n: int\n :rtype: ListNode\n \"\"\"\n # Op1: Cheating\n #arr = self.listToArray(head)\n #arr[m - 1: n] = reversed(arr[m - 1: n])\n #return self.arrToList(arr)\n\n # Op2: In-place and in one-pass\n #if m == 1:\n # curr = head\n # prev = None\n # for _ in range(n):\n # curr.next, prev, curr = prev, curr, curr.next\n # head.next = curr\n # return prev\n #else:\n # curr = head\n # for _ in range(m - 2):\n # curr = curr.next\n # splice1 = curr\n # curr = curr.next\n # splice2 = curr\n # prev = None\n # for _ in range(n - m + 1):\n # curr.next, prev, curr = prev, curr, curr.next\n # splice1.next = prev\n # splice2.next = curr\n # return head\n\n # Op3: Dummy head\n dummy = ListNode(None)\n dummy.next = head\n\n pre = dummy\n for _ in range(m - 1):\n pre = pre.next\n\n start = pre.next\n then = start.next\n\n for _ in range(n - m):\n start.next = then.next\n then.next = pre.next\n pre.next = then\n then = start.next\n return dummy.next\n\n def arrToList(self, arr):\n prev = None\n for x in reversed(arr):\n head = ListNode(x)\n head.next = prev\n prev = head\n return head\n\n def listToArray(self, head):\n arr = []\n while head:\n arr.append(head.val)\n head = head.next\n return arr\n\n\ndef print_list(node):\n while node is not None:\n print(node.val, end=\" \")\n node = node.next\n print()\n\n\ndef main():\n test = Solution()\n arr = [1, 2, 3, 4, 5]\n head = test.arrToList(arr)\n result = test.reverseBetween(head, 2, 4)\n print_list(result)\n\nmain()\n","sub_path":"python/92 Reverse Linked List II.py","file_name":"92 Reverse Linked List II.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"519307303","text":"from django.conf import settings\nfrom urlparse import urlparse\n\n\ndef resource_urls(request):\n \"\"\"Global values to pass to templates\"\"\"\n url_parsed = urlparse(settings.SOLR_URL)\n defaults = dict(\n SOLR_URL=settings.SOLR_URL,\n SOLR_IP='%s://%s:%s' % (url_parsed.scheme, url_parsed.hostname, url_parsed.port)\n )\n return defaults\n","sub_path":"hypermap/hypermap/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"239974510","text":"def solution(n, arr1, arr2):\n answer = []\n for i in range(n):\n answer.append(arr1[i] | arr2[i])\n\n ans = []\n for i in range(n):\n temp = bin(answer[i])[2:]\n if len(temp) < n:\n while len(temp) < n:\n temp = '0' + temp\n t = ''\n for i in temp:\n if i == '1':\n t += '#'\n else:\n t += ' '\n ans.append(t)\n return ans","sub_path":"Programmers/2018 KAKAO/비밀지도.py","file_name":"비밀지도.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"349142705","text":"import collections\nfrom typing import Optional, Collection\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom utils import SharedStorage, MetricTracker\n\n\nclass LossImplementationError(RuntimeError):\n \"\"\"Loss function (or class) must output a single Tensor or dictionary\n or Tensors including the key `loss`\n \"\"\"\n\n\ndef normalize(x: torch.Tensor, dim=1):\n x = x / (torch.norm(x, 2, dim=dim, keepdim=True).expand_as(x) + 1e-12)\n return x\n\n\ndef nll_loss(output, target):\n return F.nll_loss(output, target)\n\n\ndef cross_entropy(output, target):\n return F.cross_entropy(output, target)\n\n\ndef cross_entropy_smooth(output, target, epsilon):\n output = output.log_softmax(dim=-1)\n with torch.no_grad():\n true_dist = torch.zeros_like(output)\n true_dist.scatter_(1, target.data.unsqueeze(1), 1 - epsilon)\n true_dist += epsilon / output.size(-1)\n return torch.mean(torch.sum(-true_dist * output, dim=-1))\n\n\nclass TrackingLoss(nn.Module):\n \"\"\"Loss that uses `tracker` and `storage` to interactively record auxiliary metrics or share data\n with other modules in the training pipeline.\n \"\"\"\n\n def forward(self, items: collections.Mapping, tracker: MetricTracker, storage: SharedStorage):\n raise NotImplementedError()\n\n\nclass CrossEntropyLoss(TrackingLoss):\n __name__ = \"cross_entropy\"\n\n def __init__(self, output_key=\"preds\", target_key=\"targets\", label_smoothing=0):\n \"\"\"\n :param output_key:\n :param target_key:\n :param label_smoothing: Epsilon value for label smoothing or 0\n \"\"\"\n super().__init__()\n self.output_key = output_key\n self.target_key = target_key\n self.label_smoothing = label_smoothing\n\n def forward(self, items: collections.Mapping, tracker: MetricTracker, storage: SharedStorage):\n output, target = items[self.output_key], items[self.target_key]\n return cross_entropy_smooth(output, target, self.label_smoothing)\n\n\nclass TripletLoss(TrackingLoss):\n \"\"\"\n Note, assumes that each identity in a given batch has the same number of instances\n \"\"\"\n __name__ = \"triplet_loss\"\n\n def __init__(self, margin: float = None, output_key=\"features\", target_key=\"targets\", track_distances=True):\n super().__init__()\n self.margin = margin\n if margin:\n self.ranking_loss = nn.MarginRankingLoss(margin)\n else:\n self.ranking_loss = nn.SoftMarginLoss()\n self.output_key = output_key\n self.target_key = target_key\n self.track_distances = track_distances\n\n def forward(self, items: collections.Mapping, tracker: MetricTracker, storage: SharedStorage):\n features, target = items[self.output_key], items[self.target_key]\n n = len(features)\n\n batch_size = features.size(0)\n same_target = (target.eq(target.view(batch_size, 1)))\n norms = features.square().sum(dim=1, keepdim=True).expand(batch_size, batch_size)\n distmat = norms + norms.t() # a^2 + b^2\n distmat.addmm_(beta=1, alpha=-2, mat1=features, mat2=features.T) # a^2 + b^2 - 2ab\n distmat = distmat.clamp(min=1e-12).sqrt() # euclid\n\n pos_dists = distmat[same_target & ~torch.diagflat(torch.ones(n, dtype=torch.bool, device=distmat.device))]\n neg_dists = distmat[~same_target]\n hard_pos_dists, _ = distmat[same_target].view(batch_size, -1).max(dim=1)\n hard_neg_dists, _ = neg_dists.view(batch_size, -1).min(dim=1)\n if self.track_distances:\n pos_mean = hard_pos_dists.mean()\n neg_mean = hard_neg_dists.mean()\n tracker.append_histogram(\"batch_hard_dist_ap\", hard_pos_dists)\n tracker.append_histogram(\"batch_hard_dist_an\", hard_neg_dists)\n tracker.append_histogram(\"batch_dist_pos\", pos_dists)\n tracker.append_histogram(\"batch_dist_neg\", neg_dists)\n tracker.append_histogram(\"batch_hard_delta\", hard_pos_dists - hard_neg_dists)\n tracker.update(\"batch_hard_dist_ap_mean\", pos_mean, n=n)\n tracker.update(\"batch_hard_dist_an_mean\", neg_mean, n=n)\n tracker.update(\"batch_hard_dist_ap_mean\", pos_dists.mean().item(), n=n)\n tracker.update(\"batch_hard_dist_an_mean\", neg_dists.mean().item(), n=n)\n tracker.update(\"batch_hard_delta_mean\", pos_mean - neg_mean, n=n)\n\n if self.margin:\n loss = self.ranking_loss(hard_neg_dists, hard_pos_dists,\n torch.ones(batch_size, device=features.device, dtype=features.dtype))\n else:\n loss = self.ranking_loss(hard_neg_dists - hard_pos_dists,\n torch.ones(batch_size, device=features.device, dtype=features.dtype))\n return loss\n\n\nclass CenterLoss(nn.Module):\n \"\"\"Center loss.\n Code adapted from https://github.com/michuanhaohao/reid-strong-baseline\n\n Wen et al., 2016. A Discriminative Feature Learning Approach for Deep Face Recognition.\n \"\"\"\n __name__ = \"center_loss\"\n\n def __init__(self, num_classes, dim=2048, output_key=\"features\", target_key=\"targets\", device=\"cpu\"):\n super(CenterLoss, self).__init__()\n self.num_classes = num_classes\n self.dim = dim\n self.device = device\n self.output_key = output_key\n self.target_key = target_key\n self.centers = nn.Parameter(torch.randn(self.num_classes, self.dim, device=self.device))\n\n def forward(self, items: collections.Mapping, tracker: MetricTracker, storage: SharedStorage):\n \"\"\"\n :param x: feature matrix with shape (batch_size, feat_dim).\n :param labels: ground truth labels with shape (num_classes).\n :return:\n \"\"\"\n features, labels = items[self.output_key], items[self.target_key]\n if features.size(0) != labels.size(0):\n raise ValueError(\"Batch size dimensions do not match\")\n\n batch_size = features.size(0)\n distmat = torch.pow(features, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \\\n torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()\n distmat.addmm_(features, self.centers.t(), beta=1, alpha=-2)\n\n classes = torch.arange(self.num_classes, device=self.device).long()\n labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)\n mask = labels.eq(classes.expand(batch_size, self.num_classes))\n\n dist = distmat * mask.float()\n loss = dist.clamp(min=1e-12, max=1e+12).sum() / batch_size\n # dist = []\n # for i in range(batch_size):\n # value = distmat[i][mask[i]]\n # value = value.clamp(min=1e-12, max=1e+12) # for numerical stability\n # dist.append(value)\n # dist = torch.cat(dist)\n # loss = dist.mean()\n return loss\n\n\ndef test_center_loss():\n center_loss = CenterLoss(6, dim=2048)\n features = torch.rand(16, 2048)\n targets = torch.tensor([0, 1, 2, 3, 2, 3, 1, 4, 5, 3, 2, 1, 0, 0, 5, 4]).long()\n items = {\n \"features\": features,\n \"targets\": targets\n }\n loss = center_loss(items)\n print(loss)\n\n\nclass MixedLoss(TrackingLoss):\n \"\"\"Mixes various losses and records individual losses to metric tracker\n \"\"\"\n\n def __init__(self, *losses: TrackingLoss, weights: Optional[Collection[float]] = None, items_len_key=\"targets\"):\n super().__init__()\n if weights is not None and len(losses) != len(weights):\n raise ValueError(\"Number of weights must match number of losses\")\n\n self.losses = losses\n self.items_len_key = items_len_key\n self.weights = weights\n\n def forward(self, items: collections.Mapping, tracker: MetricTracker, storage: SharedStorage):\n losses = []\n for loss_cls in self.losses:\n loss = loss_cls(items, tracker, storage)\n losses.append(loss)\n if self.items_len_key:\n n = len(items[self.items_len_key])\n else:\n n = 1\n tracker.update(loss_cls.__name__, loss.item(), n=n)\n\n if self.weights:\n total = 0\n for loss, weight in zip(losses, self.weights):\n total += loss * weight\n return total\n else:\n return sum(losses)\n\n\nclass CrossEntropyTripletLoss(MixedLoss):\n def __init__(self, margin: float = None, items_len_key=\"targets\", track_distances=True, epsilon=0):\n losses = list()\n losses.append(CrossEntropyLoss(label_smoothing=epsilon))\n losses.append(TripletLoss(margin=margin, track_distances=track_distances))\n super().__init__(*losses, items_len_key=items_len_key)\n\n\nclass CrossEntropyTripletCenterLoss(MixedLoss):\n def __init__(self, num_classes: float, margin: float = None, dim=2048, items_len_key=\"targets\",\n track_distances=True, epsilon=0, beta=0.0005, device=\"cpu\"):\n losses = list()\n losses.append(CrossEntropyLoss(label_smoothing=epsilon))\n losses.append(TripletLoss(margin=margin, track_distances=track_distances))\n losses.append(CenterLoss(num_classes=num_classes, dim=dim, device=device))\n super().__init__(*losses, weights=(1, 1, beta), items_len_key=items_len_key)\n","sub_path":"model/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":9291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"558173150","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"license_scanner\",\n version=\"0.0.0\",\n author=\"Steve Klassen\",\n author_email=\"steve.klassen@sensonic.com\",\n description=\"Utility for scanning projects for third-party licenses\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://scm01.frauscher.intern/fts/rnd/docker/license-check.git\",\n packages=setuptools.find_packages(),\n install_requires=[\n 'fpdf', 'requests'\n ],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: Other/Proprietary License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.5',\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"316798526","text":"import sys\nimport multiprocessing as mp\n\n\ndef make_tree(d):\n\n if d > 0:\n d -= 1\n return (make_tree(d), make_tree(d))\n return (None, None)\n\n\ndef poll_tree(node):\n\n (l, r) = node\n if l is None:\n return 1\n else:\n return 1 + poll_tree(l) + poll_tree(r)\n\n\ndef make_poll(itde, make=make_tree, poll=poll_tree):\n\n i, d = itde\n return poll(make(d))\n\n\ndef get_argchunks(i, d, chunksize=5000):\n\n assert chunksize % 2 == 0\n chunk = []\n for k in range(1, i + 1):\n chunk.extend([(k, d)])\n if len(chunk) == chunksize:\n yield chunk\n chunk = []\n if len(chunk) > 0:\n yield chunk\n\n\ndef main(n, min_depth=4):\n\n max_depth = max(min_depth + 2, n)\n stretch_depth = max_depth + 1\n if mp.cpu_count() > 1:\n pool = mp.Pool()\n chunkmap = pool.map\n else:\n chunkmap = map\n\n print('stretch depth tree {0}\\t poll: {1}'.format(\n stretch_depth, make_poll((0, stretch_depth))))\n\n long_lived_tree = make_tree(max_depth)\n\n mmd = max_depth + min_depth\n for d in range(min_depth, stretch_depth, 2):\n i = 2 ** (mmd - d)\n cs = 0\n for argchunk in get_argchunks(i,d):\n cs += sum(chunkmap(make_poll, argchunk))\n print('{0}\\t trees depth {1}\\t poll: {2}'.format(i, d, cs))\n\n print('long lived tree depth {0}\\t poll: {1}'.format(\n max_depth, poll_tree(long_lived_tree)))\n\n\nif __name__ == '__main__':\n main(int(sys.argv[1]))\n","sub_path":"BinTree.py","file_name":"BinTree.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"314930835","text":"from django import template\nfrom wagtailnhsukfrontend.settings.models import HeaderSettings\n\nregister = template.Library()\n\n@register.inclusion_tag('wagtailnhsukfrontend/header.html', takes_context=True)\ndef header(context):\n page = context['page']\n site = page.get_site()\n header = HeaderSettings.for_site(site)\n\n return {\n 'service_name': header.service_name,\n 'service_href': header.service_link.relative_url(site) if header.service_link else '',\n 'service_long_name': header.service_long_name,\n 'transactional': header.transactional,\n 'logo_href': header.logo_link.relative_url(site) if header.logo_link else '',\n 'logo_aria': header.logo_aria,\n 'show_search': header.show_search,\n 'primary_links' : [\n {\n 'label': link.label,\n 'url': link.page.relative_url(site)\n }\n for link in header.navigation_links.all()\n ],\n }\n","sub_path":"wagtailnhsukfrontend/settings/templatetags/nhsukfrontendsettings_tags.py","file_name":"nhsukfrontendsettings_tags.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"635036234","text":"import pandas as pd \nfrom pytube import YouTube\nimport os \nSOURCE = pd.read_csv(\"extraible.csv\")\ndef bajar(link):\n base = \"/home/jaime/cosas/codigo/trabajo-minciencias/\"\n video = YouTube(link)\n try:\n descargable = video.streams.filter(type=\"audio\",file_extension=\"webm\")[0]\n descargable.download(\"./videos\")\n nombre = descargable.title\n \n except:\n print(f\"fallo {link}\")\n nombre = None\n return nombre\n\n#print(bajar(SOURCE.link))\n#print(bajar(\"https://www.youtube.com/watch?v=B3PaNSkkEL0\"))\n","sub_path":"audiovisual/bajar.py","file_name":"bajar.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"610164134","text":"# Author: Yizhak Ben-Shabat (Itzik), 2020\n# go over all of the dataset images and rearange them to comply with pytorch ImageFolder structure for the data loader\n\nimport os\nimport numpy as np\nimport sys\nsys.path.append(os.path.abspath('../action/'))\nfrom IKEAActionDataset import IKEAActionDataset\nimport shutil\nfrom tqdm import tqdm\nfrom multiprocessing import Pool\nimport itertools\nimport argparse\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset_path', type=str, default='/home/sitzikbs/Datasets/ANU_ikea_dataset_smaller',\n help='path to raw image IKEA ASM dataset dir after images were resized')\nparser.add_argument('--output_path', type=str, default='/home/sitzikbs/Datasets/ANU_ikea_dataset_smaller_ImageFolder',\n help='ImageFolder structure of the dataset')\nargs = parser.parse_args()\n\ndataset_path = args.dataset_path\noutput_path = args.output_path\n\ndef par_copy(arguments):\n frame_num, input_path, video_path, output_class_path = arguments\n frame_input_path = os.path.join(input_path, video_path, str(frame_num).zfill(6) + \".jpg\")\n frame_output_path = os.path.join(output_class_path,\n video_path.replace(\"/\", \"_\") + \"_\" + str(frame_num).zfill(6) + \".jpg\")\n shutil.copyfile(frame_input_path, frame_output_path)\n\n\ndef save_cls_images(frame_list, label_list, video_path, phase, num_classes, dataset_path, output_path, action_dict, p):\n # copy images from original dataset to imagefolder structure\n\n for cls in range(0, num_classes):\n frames = frame_list[np.where(label_list == cls)]\n cls_str = action_dict[cls]\n if not len(frames) == 0:\n output_class_path = os.path.join(output_path, phase, cls_str)\n os.makedirs(output_class_path, exist_ok=True)\n\n p.map(par_copy, zip(frames, itertools.repeat(dataset_path), itertools.repeat(video_path),\n itertools.repeat(output_class_path)))\n\nos.makedirs(output_path, exist_ok=True)\ntrain_path = os.path.join(output_path, 'train')\nos.makedirs(train_path, exist_ok=True)\ntest_path = os.path.join(output_path, 'test')\nos.makedirs(test_path, exist_ok=True)\ndb_file = 'ikea_annotation_db_full'\ntrain_file = 'train_cross_env.txt'\ntest_file = 'test_cross_env.txt'\naction_list_file = 'atomic_action_list.txt'\naction_object_relation_file = 'action_object_relation_list.txt'\ndataset = IKEAActionDataset(dataset_path, db_file, action_list_file, action_object_relation_file, train_file, test_file)\n\n\ndataset_name = 'ANU_ikea_dataset'\n\ntrainset_videos = dataset.trainset_video_list\ntestset_videos = dataset.testset_video_list\naction_dict = dataset.action_list\n\nn_name_chars = len(dataset_name) + 1\n\nnum_classes = dataset.num_classes\ncursor_vid = dataset.get_annotated_videos_table(device='dev3')\nrows = cursor_vid.fetchall()\nwith Pool(8) as p:\n with tqdm(total=len(rows), file=sys.stdout) as pbar:\n for row in rows:\n pbar.update(1)\n n_frames = int(row[\"nframes\"])\n frame_list = np.arange(0, n_frames)\n label_list = np.zeros_like(frame_list)\n video_idx = row['id']\n video_path = row['video_path']\n video_name = os.path.join(video_path.split('/')[0], video_path.split('/')[1])\n if video_name in trainset_videos or video_name in testset_videos:\n cursor_annotations = dataset.get_video_annotations_table(video_idx)\n\n for ann_row in cursor_annotations:\n action_id = dataset.get_action_id(ann_row[\"atomic_action_id\"], ann_row[\"object_id\"]) # no need to +1 because table index starts at 1\n if action_id is not None:\n label_list[ann_row['starting_frame']:ann_row['ending_frame']] = action_id\n\n\n if video_name in trainset_videos:\n # trainset\n save_cls_images(frame_list, label_list, video_path, 'train', num_classes,\n dataset_path, output_path, action_dict, p)\n\n elif video_name in testset_videos:\n # testset\n save_cls_images(frame_list, label_list, video_path, 'test', num_classes,\n dataset_path, output_path, action_dict, p)\n\n\n\n","sub_path":"toolbox/rearange_dataset_images_to_ImageFolder.py","file_name":"rearange_dataset_images_to_ImageFolder.py","file_ext":"py","file_size_in_byte":4317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"397486556","text":"import operator\nimport string\n\n\nclass Solution(object):\n OPERATORS = {\n '+': operator.add,\n '-': operator.sub,\n '*': operator.mul,\n }\n\n def diffWaysToCompute(self, input):\n tokens = []\n i = 0\n while i < len(input):\n if input[i] in '+-*':\n tokens.append(input[i])\n i += 1\n else:\n s_i = i\n while input[i] in string.digits:\n i += 1\n if i == len(input):\n break\n tokens.append(int(input[s_i:i]))\n\n if len(tokens) == 1:\n return tokens\n\n if tokens[0] == '-':\n tokens = [0] + tokens\n result = self.variations(tokens, 0, len(tokens)-1, {})\n return sorted(result)\n\n def variations(self, tokens, s, e, cache):\n if s == e:\n return [tokens[s]]\n if (s, e,) in cache:\n return cache[(s, e,)]\n\n result = []\n for i in range(s + 1, e + 1, 2):\n f = self.OPERATORS[tokens[i]]\n l_nums = self.variations(tokens, s, i - 1, cache)\n r_nums = self.variations(tokens, i + 1, e, cache)\n for l in l_nums:\n for r in r_nums:\n result.append(f(l, r))\n\n cache[(s, e,)] = result\n return result\n\n\ndef test():\n s = Solution()\n assert s.diffWaysToCompute('10+5') == [15]\n assert s.diffWaysToCompute('2-1-1') == [0, 2]\n assert s.diffWaysToCompute('2*3-4*5') == [-34, -14, -10, -10, 10]\n assert s.diffWaysToCompute('11') == [11]\n\n\nif __name__ == '__main__':\n test()\n","sub_path":"leetcode/241.py","file_name":"241.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"44695460","text":"import os\nimport argparse\nimport logging\nimport settings\nimport data_manager\nfrom policy_learner_AC import PolicyLearner\n\n\ndef chooseModelver(stock_code):\n try:\n print(\"\\n\",os.listdir('models/{}'.format(stock_code)),'\\n')\n idx = int(input(\"Select model number using index : \"))\n return os.listdir('models/{}'.format(stock_code))[idx]\n except:\n raise settings.UndefinedModel()\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--code\",type=str,default='015760')\n parser.add_argument(\"--tax\",type=str,default='n')\n parser.add_argument(\"--bal\",type=int,default=1000000)\n parser.add_argument(\"--reward\",type=float,default=.02)\n\n FLAGs, _ = parser.parse_known_args()\n \n stock_code = FLAGs.code\n tax=FLAGs.tax\n bal=FLAGs.bal\n reward=FLAGs.reward\n\n if tax=='y':\n tax=True\n else:\n tax=False\n\n\n model_ver = chooseModelver(stock_code)[6:-3]\n\n # Log record\n log_dir = os.path.join(settings.BASE_DIR, 'logs/%s' % stock_code)\n timestr = settings.get_time_str()\n file_handler = logging.FileHandler(filename=os.path.join(\n log_dir, \"%s_%s.log\" % (stock_code, timestr)), encoding='utf-8')\n stream_handler = logging.StreamHandler()\n file_handler.setLevel(logging.DEBUG)\n stream_handler.setLevel(logging.INFO)\n logging.basicConfig(format=\"%(message)s\",\n handlers=[file_handler, stream_handler], level=logging.DEBUG)\n\n # Prepare stock data\n chart_data = data_manager.load_chart_data(\n os.path.join(settings.BASE_DIR,\n 'data/chart_data/{}.csv'.format(stock_code)))\n prep_data = data_manager.preprocess(chart_data)\n training_data = data_manager.build_training_data(prep_data)\n\n # Date range filtering\n training_data = training_data[(training_data['date'] >= '2019-01-01') &\n (training_data['date'] <= '2019-12-31')]\n training_data = training_data.dropna()\n\n # Chart Data Separation\n features_chart_data = ['date', 'open', 'high', 'low', 'close', 'volume']\n chart_data = training_data[features_chart_data]\n\n # Training data separation\n features_training_data = [\n 'open_lastclose_ratio', 'high_close_ratio', 'low_close_ratio',\n 'close_lastclose_ratio', 'volume_lastvolume_ratio',\n 'close_ma5_ratio', 'volume_ma5_ratio',\n 'close_ma10_ratio', 'volume_ma10_ratio',\n 'close_ma20_ratio', 'volume_ma20_ratio'\n ]\n training_data = training_data[features_training_data]\n\n # Start non-training investment simulation\n policy_learner = PolicyLearner(\n stock_code=stock_code, chart_data=chart_data, training_data=training_data,\n min_trading_unit=1, max_trading_unit=3,delayed_reward_threshold=reward,tax=tax)\n policy_learner.trade(balance=bal,\n model_path=os.path.join(\n settings.BASE_DIR,\n 'models/{}/model_{}.h5'.format(stock_code, model_ver)))\n","sub_path":"AC/main_test.py","file_name":"main_test.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"116284349","text":"# Copyright (c) 2015 by Farsight Security, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\nimport hashlib\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nclass DigestError(Exception): pass\n\ndef check_digest(iterator, algorithm, digest):\n logger.debug('algorithm={}, checksum={}'.format(algorithm, digest))\n\n if algorithm is None:\n logger.debug('No algorithm provided. Skipping digest check.')\n for chunk in iterator:\n yield chunk\n return\n elif algorithm.lower() in ('sha-224', 'sha224'):\n digest_obj = hashlib.sha224()\n elif algorithm.lower() in ('sha-256', 'sha256'):\n digest_obj = hashlib.sha256()\n elif algorithm.lower() in ('sha-384', 'sha384'):\n digest_obj = hashlib.sha384()\n elif algorithm.lower() in ('sha-512', 'sha512'):\n digest_obj = hashlib.sha512()\n else:\n logger.debug('Unsupported algorithm: {}'.format(algorithm))\n for chunk in iterator:\n yield chunk\n return\n\n for chunk in iterator:\n digest_obj.update(chunk)\n yield chunk\n\n real_digest = base64.b64encode(digest_obj.digest())\n if real_digest != digest:\n raise DigestError('Digest mismatch: {} != {}'.format(real_digest, digest))\n\ndef digest_extension(algorithm):\n if algorithm.lower() in ('sha-224', 'sha224'):\n return 'sha224'\n elif algorithm.lower() in ('sha-256', 'sha256'):\n return 'sha256'\n elif algorithm.lower() in ('sha-384', 'sha384'):\n return 'sha384'\n elif algorithm.lower() in ('sha-512', 'sha512'):\n return 'sha512'\n else:\n raise DigestError('Unknown algorithm: {}'.format(algorithm))\n\nDIGEST_EXTENSIONS = ('sha224', 'sha256', 'sha384', 'sha512')\n","sub_path":"dnstable_manager/digest.py","file_name":"digest.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"502599648","text":"#This program reads the syntax of our created language, and converts it to HTML code\nfrom Element import *\n\ntitle = '';\nelements = [];\n\ndef initialize(webpageTitle):\n title = webpageTitle\n global f, styles;\n f= open(title + '.html', 'w')\n styles = open(title + '.css', 'w')\n \n f.write('\\n')\n f.write('\\n')\n f.write('\\t' + title + '\\n')\n f.write('\\t\\n')\n f.write('\\n')\n\n f.write('\\n\\n')\n return;\n\ndef searchElement(identifier):\n for e in elements:\n if e.getID() is identifier:\n return e\n\n return None\n \ndef writeHeader(identifier, number, text):\n if number < 1 or number > 6:\n raise ValueError('Invalid header number: ' + str(number) + '\\n')\n\n f.write('\\t' + text + '\\n')\n elements.append(Element(identifier, 'heading' + str(number), text))\n return\n\ndef writeParagraph(identifier, text):\n f.write('\\t

' + text + '

\\n')\n elements.append(Element(identifier, 'paragraph', text))\n return \n\ndef writeHyperlink(identifier, link, text):\n if identifier is None:\n f.write('\\t' + text + '\\n')\n else:\n f.write('\\t' + text + '\\n')\n elements.append(Element(identifier, 'hyperlink', text))\n return\n\ndef writeImage(identifier, source, link):\n if link is None:\n f.write('\\t\\n')\n else:\n writeHyperlink(identifier, link, '')\n elements.append(Element(identifier, 'image', None))\n return\n\ndef setColor(identifier, color):\n elm = searchElement(identifier)\n elm.setColor(color)\n return\n\ndef setAlignment(identifier, alignment):\n elm = searchElement(identifier)\n elm.setAlignment(alignment)\n return\n\ndef setFont(identifier, font):\n elm = searchElement(identifier)\n elm.setFont(font)\n return\n\ndef setBold(identifier, isBold):\n elm = searchElement(identifier)\n elm.setBold(isBold)\n return\n\ndef setItalic(identifier, isItalic):\n elm = searchElement(identifier)\n elm.setItalic(isItalic)\n return\n\ndef setUnderline(identifier, isUnderline):\n elm = searchElement(identifier)\n elm.setUnderline(isUnderline)\n return\n\ndef finalize():\n f.write('\\n')\n f.write('')\n\n f.close();\n \n for e in elements:\n styles.write('#' + e.getID() + '{\\n')\n styles.write('\\tcolor: ' + e.getColor() + ';\\n')\n styles.write('\\ttext-align: ' + e.getAlignment() + ';\\n')\n styles.write('\\tfont: ' + e.getFont() + ';\\n') #TODO: Fix\n if e.getBold() is True:\n styles.write('\\tfont-weight: bold;\\n')\n if e.getItalic() is True:\n styles.write('\\tfont-style: italic;\\n')\n if e.getUnderline() is True:\n styles.write('\\ttext-decoration: underline;\\n')\n styles.write('}\\n\\n')\n \n styles.close()\n return\n\n\n\n\n","sub_path":"Interpreter.py","file_name":"Interpreter.py","file_ext":"py","file_size_in_byte":3150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"47353498","text":"#!/usr/local/bin/python3.3\n\nimport http.client\nimport http.cookiejar\nimport urllib, urllib.error\nimport re\nimport os, sys\nimport time\nimport subprocess\nimport multiprocessing\nimport glob\n\n\n\ndef login():\n values = {\n 'user_login': 'userid',\n 'user_password': 'password'\n }\n params = urllib.parse.urlencode(values)\n params = params.encode('utf-8')\n req = urllib.request.Request('http://www.mersenne.org/default.php', params)\n while True:\n try:\n response = urllib.request.urlopen(req, timeout=60)\n if (response.reason == 'OK'):\n print('login')\n time.sleep(1)\n break\n except (urllib.error.URLError, urllib.error.HTTPError) as e:\n print('login errors:', e)\n except:\n print('Unexcepted errors in login:', sys.exc_info()[0])\n time.sleep(30)\n \n return\n\n\ndef logout():\n values = {\n 'logout' : 'u'\n }\n params = urllib.parse.urlencode(values)\n params = params.encode('utf-8')\n req = urllib.request.Request('http://www.mersenne.org/manual_assignment/default.php?logout=u')\n while True:\n try:\n response = urllib.request.urlopen(req, timeout=60)\n if (response.reason == 'OK'):\n print('logout')\n time.sleep(1)\n break\n except (urllib.error.URLError, urllib.error.HTTPError) as e:\n print('logout errors:', e)\n except:\n print('Unexcepted errors in logout:', sys.exc_info()[0])\n time.sleep(30)\n \n return\n\n\ndef get_assignments():\n login()\n \n values = {\n 'cores' : '1',\n 'num_to_get' : '6',\n 'pref' : '2',\n 'exp_lo' : '',\n 'exp_hi' : '',\n 'B1' : 'Get Assignments'\n }\n params = urllib.parse.urlencode(values)\n params = params.encode('utf-8')\n req = urllib.request.Request('http://www.mersenne.org/manual_assignment/default.php', params)\n while True:\n try:\n response = urllib.request.urlopen(req, timeout=60)\n if (response.reason == 'OK'):\n page = response.read().decode('utf-8')\n #Factor=9A87D89A3977A81FE465AE0811AEEAF7,90481907,69,70\n factor = re.findall(r\"Factor=\\w+,\\d{8,15},\\d{2},\\d{2}\", page)\n if (len(factor)):\n with open('worktodo.txt', 'a', encoding='utf-8') as file:\n file.write('\\n'.join(factor))\n print('get_assignments')\n time.sleep(1)\n break\n except (urllib.error.URLError, urllib.error.HTTPError) as e:\n print('Get Assignments errors:', e)\n except:\n print('Unexcepted errors in Get Assignments:', sys.exc_info()[0])\n time.sleep(30)\n\n logout()\n \n return\n\n\ndef submit_results():\n with open('results.txt', 'r', encoding='utf-8') as file:\n results = file.read()\n\n login()\n \n values = {\n 'B1' : 'Submit',\n 'data' : results\n }\n params = urllib.parse.urlencode(values)\n params = params.encode('utf-8')\n req = urllib.request.Request(\"http://www.mersenne.org/manual_result/default.php\", params)\n while True:\n try:\n response = urllib.request.urlopen(req, timeout=60)\n if (response.reason == 'OK'):\n page = response.read().decode('utf-8')\n page_re = re.findall(r\"M\\d{8,15}\", page)\n results_re = re.findall(r\"M\\d{8,15}\", results)\n #수신 받은 html에 전송된 results.txt 내용이 있는지 검사한 후 remove\n if ((set(page_re) & set(results_re)) == set(results_re)):\n with open('results.log', 'a', encoding='utf-8') as file:\n file.write(results)\n os.remove('results.txt')\n print('submit_results')\n time.sleep(1)\n break\n except (urllib.error.URLError, urllib.error.HTTPError) as e:\n print('send results errors:', e)\n except:\n print('Unexcepted errors in send results:', sys.exc_info()[0])\n time.sleep(30)\n\n logout()\n \n return\n\n\ndef run_mfaktc(device_num):\n os.chdir('./mfaktc-0.20_'+str(device_num))\n\n if len(glob.glob('*.ckp')):\n subprocess.call(['./mfaktc.exe', '-d', str(device_num)])\n\n cj = http.cookiejar.CookieJar()\n opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))\n opener.addheaders = [('User-agent', 'Mozilla/5.0')]\n urllib.request.install_opener(opener)\n \n while True:\n if (os.path.isfile('exit')):\n break\n \n #Get Assignments 1 cores, 6 assignments, Trial Factoring works\n get_assignments()\n \n #run cuda\n subprocess.call(['./mfaktc.exe', '-d', str(device_num)])\n\n #submit results\n submit_results()\n\n time.sleep(1)\n\n return\n \n\nif __name__ == '__main__':\n total_device = 1\n if (len(sys.argv) > 1):\n total_device = int(sys.argv[1])\n\n workers = []\n for device in range(total_device):\n p = multiprocessing.Process(target=run_mfaktc, args=(device,))\n workers.append(p)\n p.start()\n time.sleep(10)\n\n for p in workers:\n p.join()\n","sub_path":"mfaktc.py","file_name":"mfaktc.py","file_ext":"py","file_size_in_byte":5371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"45563849","text":"#!/usr/bin/env python\r\n#coding:utf-8\r\n\r\nimport pyaudio\r\nimport wave\r\nimport numpy as np\r\nfrom datetime import datetime\r\n\r\n# 一定以上の音量を2秒間録音してwavファイルを作成する\r\n\r\nCHUNK = 1024\r\nFORMAT = pyaudio.paInt16 # int16型\r\nCHANNELS = 1 # モノラル\r\nRATE = 44100 # 441.kHz\r\nRECORD_SECONDS = 2 # 2秒間録音\r\nTHRESHOLD = 0.01 # 閾値(ここを調整する)\r\n\r\n# pyAudioインスタンスを作成\r\np = pyaudio.PyAudio()\r\n\r\n# ストリーム開始\r\nstream = p.open(format = FORMAT, channels = CHANNELS, rate = RATE, input = True, frames_per_buffer = CHUNK)\r\n\r\nwhile True:\r\n\r\n # データを読み込み閾値を計算\r\n data = stream.read(CHUNK)\r\n x = np.frombuffer(data, dtype=\"int16\") / 32768.0\r\n\r\n # 設定値以上の閾値なら5秒間録音\r\n if x.max() > THRESHOLD:\r\n\r\n # ファイル名を設定\r\n filename = datetime.today().strftime(\"%Y%m%d%H%M%S\") + \".wav\"\r\n print(filename)\r\n\r\n # 2秒間録音\r\n frames = []\r\n for i in range(0, int(RATE / CHUNK * int(RECORD_SECONDS))):\r\n data = stream.read(CHUNK)\r\n frames.append(data)\r\n\r\n # wavファイルを作成\r\n out = wave.open(filename,'w') # ファイルオープン\r\n out.setnchannels(CHANNELS) # モノラル\r\n out.setsampwidth(p.get_sample_size(FORMAT)) # サンプリング周波数\r\n out.setframerate(RATE) # フレーム数\r\n out.writeframes(frames) # データをファイルに書き込み\r\n out.close() # ファイルクローズ\r\n\r\n print(\"Saved.\")\r\n\r\n # key入力待ち:引数はウェイト時間(ms):0だと無限に待つ\r\n inkey = cv2.waitKey(1)\r\n if inkey == 0x1b:\r\n #ESCで終了\r\n break\r\n\r\n# ストリームを終了・破棄\r\nstream.stop_stream()\r\nstream.close()\r\np.terminate()\r\n","sub_path":"sonota_sample/sample_pyaudio2.py","file_name":"sample_pyaudio2.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"371886010","text":"input = \"010101\"\n\n\ndef find_count_to_turn_out_to_all_zero_or_all_one(string):\n # count_to_all_zero = 0\n # count_to_all_one = 0\n # if string[0] == '0': count_to_all_one += 1\n # else : count_to_all_zero += 1\n #\n # for i in range(len(string) - 1):\n # if string[i] != string[i+1]:\n # if string[i+1] == '0': count_to_all_one += 1\n # else : count_to_all_zero += 1\n # return min(count_to_all_zero, count_to_all_one)\n\n changeZero = 0\n changeOne = 0\n\n for i in range(len(string) - 1):\n if string[i] != string[i+1]:\n if string[i+1] == '0' : changeOne += 1\n else : changeZero += 1\n\n if string[0] == '0' : changeOne += 1\n else : changeZero += 1\n\n return min(changeZero, changeOne)\n\nresult = find_count_to_turn_out_to_all_zero_or_all_one(input)\nprint(result)","sub_path":"week_1/homework/02_find_count_to_turn_out_to_all_zero_or_all_one.py","file_name":"02_find_count_to_turn_out_to_all_zero_or_all_one.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"273674368","text":"# -*- coding: utf-8 -*-\r\n\r\n# Import modules\r\nimport scipy.stats\r\nimport numpy as np\r\n\r\nsizeIntsList=xrange(10,1000,100)\r\nsizeInt=len(sizeIntsList)\r\nmeanTheta=np.zeros(sizeInt,dtype=float)\r\nsdTheta=np.zeros(sizeInt,dtype=float)\r\nmeanDiagOmega=np.zeros(sizeInt,dtype=float)\r\nmeanLatOmega=np.zeros(sizeInt,dtype=float)\r\nsdMeanOmega=np.zeros(sizeInt,dtype=float)\r\nsdLatOmega=np.zeros(sizeInt,dtype=float)\r\n\r\n#loop\r\nfor indexInt,N in enumerate(sizeIntsList,):\r\n Gamma=scipy.stats.norm(\r\n #0.5/np.sqrt(N),\r\n #3./np.sqrt(N)\r\n 0.5,\r\n 3\r\n ).rvs((N,N))\r\n Omega=np.dot(Gamma.T,Gamma)\r\n meanTheta[indexInt]=np.mean((np.sum(Gamma*Gamma/2., axis=0)))\r\n meanDiagOmega[indexInt]=np.mean(np.diag(Omega),axis=0)\r\n meanLatOmega[indexInt]=np.sum(np.sum(Omega))/(float((N*(N-1))))\r\n\r\n#plot\r\nfrom matplotlib import pyplot\r\npyplot.plot(sizeIntsList,meanTheta,'.-',color='blue',label=\"$\\Theta$\")\r\npyplot.plot(sizeIntsList,meanDiagOmega,'.-',color='red',label=\"$Diag\\ \\Omega$\")\r\npyplot.plot(sizeIntsList,meanLatOmega,'.-',color='orange',label=\"$Lat\\ \\Omega$\")\r\npyplot.legend()\r\npyplot.show()\r\n\r\n\r\n\r\n","sub_path":"tests_spike/00_test_normalisation.py","file_name":"00_test_normalisation.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"272032132","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/chaoyu/workspace/BentoML/bentoml/configuration/__init__.py\n# Compiled at: 2019-12-18 20:21:36\n# Size of source mod 2**32: 6350 bytes\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os, logging\nfrom pathlib import Path\nfrom bentoml import __version__\nfrom bentoml.utils import _is_pypi_release\nfrom bentoml.exceptions import BentoMLConfigException\nfrom bentoml.configuration.configparser import BentoMLConfigParser\nlogger = logging.getLogger(__name__)\nDEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), 'default_bentoml.cfg')\n\ndef expand_env_var(env_var):\n \"\"\"Expands potentially nested env var by repeatedly applying `expandvars` and\n `expanduser` until interpolation stops having any effect.\n \"\"\"\n if not env_var:\n return env_var\n while True:\n interpolated = os.path.expanduser(os.path.expandvars(str(env_var)))\n if interpolated == env_var:\n return interpolated\n env_var = interpolated\n\n\ndef parameterized_config(template):\n \"\"\"Generates a configuration from the provided template + variables defined in\n current scope\n\n Args:\n :param template: a config content templated with {{variables}}\n Returns:\n string: config content after templated with locals() and globals()\n \"\"\"\n all_vars = {k:v for d in [globals(), locals()] for k, v in d.items()}\n return (template.format)(**all_vars)\n\n\nDEFAULT_BENTOML_HOME = expand_env_var(os.environ.get('BENTOML_HOME', '~/bentoml'))\nBENTOML_HOME = DEFAULT_BENTOML_HOME\nBENTOML_VERSION = __version__\nPREV_PYPI_RELEASE_VERSION = __version__.split('+')[0]\nif not _is_pypi_release():\n BENTOML_VERSION = PREV_PYPI_RELEASE_VERSION\n\ndef get_local_config_file():\n global BENTOML_HOME\n if 'BENTOML_CONFIG' in os.environ:\n return expand_env_var(os.environ.get('BENTOML_CONFIG'))\n return os.path.join(BENTOML_HOME, 'bentoml.cfg')\n\n\ndef load_config():\n try:\n Path(BENTOML_HOME).mkdir(exist_ok=True)\n except OSError as err:\n try:\n raise BentoMLConfigException(\"Error creating bentoml home directory '{}': {}\".format(BENTOML_HOME, err.strerror))\n finally:\n err = None\n del err\n\n with open(DEFAULT_CONFIG_FILE, 'rb') as (f):\n DEFAULT_CONFIG = f.read().decode('utf-8')\n loaded_config = BentoMLConfigParser(default_config=(parameterized_config(DEFAULT_CONFIG)))\n local_config_file = get_local_config_file()\n if os.path.isfile(local_config_file):\n logger.info('Loading local BentoML config file: %s', local_config_file)\n with open(local_config_file, 'rb') as (f):\n loaded_config.read_string(parameterized_config(f.read().decode('utf-8')))\n else:\n logger.info('No local BentoML config file found, using default configurations')\n return loaded_config\n\n\n_config = None\n\ndef _reset_bentoml_home(new_bentoml_home_directory):\n global BENTOML_HOME\n global DEFAULT_BENTOML_HOME\n global _config\n DEFAULT_BENTOML_HOME = new_bentoml_home_directory\n BENTOML_HOME = new_bentoml_home_directory\n _config = load_config()\n from bentoml.utils.log import configure_logging\n root = logging.getLogger()\n map(root.removeHandler, root.handlers[:])\n map(root.removeFilter, root.filters[:])\n configure_logging()\n\n\ndef _get_bentoml_home():\n return BENTOML_HOME\n\n\ndef config(section=None):\n global _config\n if _config is None:\n _config = load_config()\n if section is not None:\n return _config[section]\n return _config\n\n\ndef get_bentoml_deploy_version():\n \"\"\"\n BentoML version to use for generated docker image or serverless function bundle to\n be deployed, this can be changed to an url to your fork of BentoML on github, or an\n url to your custom BentoML build, for example:\n\n bentoml_deploy_version = git+https://github.com/{username}/bentoml.git@{branch}\n \"\"\"\n bentoml_deploy_version = config('core').get('bentoml_deploy_version')\n if bentoml_deploy_version != __version__:\n logger.warning(\"BentoML local changes detected - Local BentoML repository including all code changes will be bundled together with the BentoService bundle. When used with docker, the base docker image will be default to same version as last PyPI release at version: %s. You can also force bentoml to use a specific version for deploying your BentoService bundle, by setting the config 'core/bentoml_deploy_version' to a pinned version or your custom BentoML on github, e.g.:'bentoml_deploy_version = git+https://github.com/{username}/bentoml.git@{branch}'\", PREV_PYPI_RELEASE_VERSION)\n return bentoml_deploy_version","sub_path":"pycfiles/BentoML-0.7.3-py3-none-any/__init__.cpython-37.py","file_name":"__init__.cpython-37.py","file_ext":"py","file_size_in_byte":4849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"219664723","text":"# -*- coding: utf-8 -*-\nr\"\"\"\nLightning Trainer Setup\n=======================\n Setup logic for the lightning trainer.\n\"\"\"\nimport os\nfrom argparse import Namespace\nfrom datetime import datetime\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.callbacks import (\n EarlyStopping,\n LearningRateLogger,\n ModelCheckpoint,\n)\nfrom pytorch_lightning.loggers import TensorBoardLogger\n\nfrom utils import Config\n\n\nclass TrainerConfig(Config):\n \"\"\"\n The TrainerConfig class is used to define default hyper-parameters that\n are used to initialize our Lightning Trainer. These parameters are then overwritted\n with the values defined in the YAML file.\n\n -------------------- General Parameters -------------------------\n\n :param seed: Training seed.\n\n :param deterministic: If true enables cudnn.deterministic. Might make your system\n slower, but ensures reproducibility.\n\n :param verbose: verbosity mode.\n\n :param overfit_batches: Uses this much data of the training set. If nonzero, will use\n the same training set for validation and testing. If the training dataloaders\n have shuffle=True, Lightning will automatically disable it.\n\n -------------------- Model Checkpoint & Early Stopping -------------------------\n\n :param early_stopping: If true enables EarlyStopping.\n\n :param save_top_k: If save_top_k == k, the best k models according to the metric\n monitored will be saved.\n\n :param monitor: Metric to be monitored.\n\n :param save_weights_only: Saves only the weights of the model.\n\n :param metric_mode: One of {min, max}. In min mode, training will stop when the\n metric monitored has stopped decreasing; in max mode it will stop when the\n metric monitored has stopped increasing.\n\n :param min_delta: Minimum change in the monitored metric to qualify as an improvement.\n\n :param patience: Number of epochs with no improvement after which training will be stopped.\n\n :param accumulate_grad_batches: Gradient accumulation steps.\n \"\"\"\n\n seed: int = 3\n deterministic: bool = True\n verbose: bool = False\n overfit_batches: float = 0.0\n\n # Model Checkpoint & Early Stopping\n early_stopping: bool = True\n save_top_k: int = 1\n monitor: str = \"macro-f1\"\n save_weights_only: bool = False\n metric_mode: str = \"max\"\n min_delta: float = 0.0\n patience: int = 1\n accumulate_grad_batches: int = 1\n\n def __init__(self, initial_data: dict) -> None:\n trainer_attr = pl.Trainer.default_attributes()\n for key in trainer_attr:\n setattr(self, key, trainer_attr[key])\n\n for key in initial_data:\n if hasattr(self, key):\n setattr(self, key, initial_data[key])\n\n\ndef build_trainer(hparams: Namespace) -> pl.Trainer:\n \"\"\"\n :param hparams: Namespace\n\n Returns:\n - pytorch_lightning Trainer\n \"\"\"\n # Early Stopping Callback\n early_stop_callback = EarlyStopping(\n monitor=hparams.monitor,\n min_delta=hparams.min_delta,\n patience=hparams.patience,\n verbose=hparams.verbose,\n mode=hparams.metric_mode,\n )\n\n # TestTube Logger Callback\n tb_logger = TensorBoardLogger(\n save_dir=\"experiments/\",\n version=\"version_\" + datetime.now().strftime(\"%d-%m-%Y--%H-%M-%S\"),\n name=\"\",\n )\n\n # Model Checkpoint Callback\n ckpt_path = os.path.join(\n \"experiments/\",\n tb_logger.version,\n \"checkpoints\",\n )\n\n checkpoint_callback = ModelCheckpoint(\n filepath=ckpt_path,\n save_top_k=hparams.save_top_k,\n verbose=hparams.verbose,\n monitor=hparams.monitor,\n save_weights_only=hparams.save_weights_only,\n period=0, # Always allow saving checkpoint even within the same epoch\n mode=hparams.metric_mode,\n )\n\n trainer = pl.Trainer(\n logger=tb_logger,\n checkpoint_callback=checkpoint_callback,\n early_stop_callback=early_stop_callback,\n callbacks=[LearningRateLogger()],\n gradient_clip_val=hparams.gradient_clip_val,\n gpus=hparams.gpus,\n log_gpu_memory=\"all\",\n deterministic=hparams.deterministic,\n overfit_batches=hparams.overfit_batches,\n accumulate_grad_batches=hparams.accumulate_grad_batches,\n max_epochs=hparams.max_epochs,\n min_epochs=hparams.min_epochs,\n limit_train_batches=hparams.limit_train_batches,\n limit_val_batches=hparams.limit_val_batches,\n val_check_interval=hparams.val_check_interval,\n log_save_interval=hparams.log_save_interval,\n distributed_backend=\"dp\",\n precision=hparams.precision,\n weights_summary=\"top\",\n profiler=hparams.profiler,\n num_sanity_val_steps=5,\n )\n return trainer\n","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":4784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"320412808","text":"import json\nimport base64\n\nfrom flask import Blueprint\nfrom flask import render_template\nfrom flask import request\nfrom flask import jsonify\n\nfrom hw_diag.utilities.hardware import should_display_lte\n\n\nDIAGNOSTICS = Blueprint('DIAGNOSTICS', __name__)\n\n\ndef read_diagnostics_file():\n diagnostics = {}\n try:\n with open('diagnostic_data.json', 'r') as f:\n diagnostics = json.load(f)\n except FileNotFoundError:\n msg = 'Diagnostics have not yet run, please try again in a few minutes'\n diagnostics = {'error': msg}\n return diagnostics\n\n\n@DIAGNOSTICS.route('/')\ndef get_diagnostics():\n diagnostics = read_diagnostics_file()\n\n if request.args.get('json'):\n response = jsonify(diagnostics)\n response.headers.set('Content-Disposition',\n 'attachment;filename=nebra-diag.json'\n )\n return response\n\n display_lte = should_display_lte(diagnostics)\n\n return render_template(\n 'diagnostics_page.html',\n diagnostics=diagnostics,\n display_lte=display_lte\n )\n\n\n@DIAGNOSTICS.route('/initFile.txt')\ndef get_initialisation_file():\n diagnostics = read_diagnostics_file()\n\n if diagnostics.get('error'):\n return 'Internal Server Error', 500\n\n response = {\n \"VA\": diagnostics['VA'],\n \"FR\": diagnostics['FR'],\n \"E0\": diagnostics['E0'],\n \"RPI\": diagnostics['RPI'],\n \"OK\": diagnostics['OK'],\n \"PK\": diagnostics['PK'],\n \"PF\": diagnostics[\"PF\"],\n \"ID\": diagnostics[\"ID\"]\n }\n\n response_b64 = base64.b64encode(str(json.dumps(response)).encode('ascii'))\n return response_b64\n","sub_path":"hw_diag/views/diagnostics.py","file_name":"diagnostics.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"162137943","text":"from tkinter import Frame, Menu, Canvas, filedialog, messagebox\nfrom PIL import Image, ImageTk\nfrom DicomHandler import DicomHandler\nimport numpy as np\nfrom sys import platform as sys_pf\n\n# OSX build fix\nif sys_pf == 'darwin':\n import matplotlib\n matplotlib.use(\"TkAgg\")\n\nimport json\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom voxelMethods import getVoxelPosition\n\nPOINT_RADIUS_HALF = 3\n\n\nclass Window(Frame):\n\n def __init__(self, root, size=(500, 500)):\n super().__init__()\n self.root = root\n self.size = size\n self.root.geometry(str(self.size[0]) + \"x\" + str(self.size[1]) + \"+0+0\")\n self.dicomHandler = None\n self.canvas = None\n\n self.imageShown = False\n self.imagesToShow = []\n self.imagesSlideshow = False\n self.imagesMeta = []\n self.imageCount = 0\n\n self.points = []\n self.pointsForImages = []\n self.initUI()\n\n def initUI(self):\n self.master.title(\"Dicometer\")\n self.menubar = Menu(self.master)\n self.master.config(menu=self.menubar)\n fileMenu = Menu(self.menubar)\n self.menubar.add_cascade(label=\"File\", menu=fileMenu)\n fileMenu.add_command(label=\"Open single file\", command=self.onFileOpen)\n fileMenu.add_command(label=\"Open directory\", command=self.onDirOpen)\n fileMenu.add_command(label=\"Exit\", command=self.onExit)\n\n slideshowControlls = Menu(self.menubar)\n self.menubar.add_cascade(label=\"Controlls\", menu=slideshowControlls)\n slideshowControlls.add_command(label=\"Next\", command=self.onNextImage)\n slideshowControlls.add_command(label=\"Preview\", command=self.onPreview)\n slideshowControlls.add_command(label=\"View Metrics\", command=self.onMetrics)\n slideshowControlls.add_command(label=\"Export\", command=self.onExport)\n\n helpControlls = Menu(self.menubar)\n self.menubar.add_cascade(label=\"Help\", menu=helpControlls)\n helpControlls.add_command(label=\"Reference\", command=self.onReference)\n\n self.menubar.entryconfig(\"Controlls\", state=\"disabled\")\n self.initCanvas(self.size)\n self.canvas.create_text(\n self.size[0] >> 1,\n self.size[1] >> 1,\n text=\"Open file or directory by following File option\",\n fill=\"black\"\n )\n\n def initCanvas(self, size):\n if self.canvas:\n self.canvas.destroy()\n self.canvas = Canvas(self.root, width=size[0], height=size[1])\n self.canvas.pack(expand=1)\n self.canvas.bind(\"\", self.onMouseClicked)\n self.canvas.bind(\"\", self.onMouseRightClicked)\n\n def showImage(self, imageToShow):\n width, height = np.array(imageToShow).shape\n self.root.geometry(str(width) + \"x\" + str(height))\n self.initCanvas((width, height))\n\n img = ImageTk.PhotoImage(image=Image.fromarray(imageToShow))\n self.root.img = img\n self.canvas.create_image((0, 0), image=img, state=\"normal\", anchor=\"nw\")\n self.imageShown = True\n\n def redraw(self):\n self.showImage(self.imagesToShow[self.imageCount])\n self.drawDynamic()\n\n def getPointsForImages(self, images):\n print(\"Requesting points for images\")\n self.imagesToShow = images\n self.imagesSlideshow = True\n self.showImage(images[0])\n self.showSlideshowMenu()\n\n def showSlideshowMenu(self):\n self.menubar.entryconfig(\"Controlls\", state=\"normal\")\n\n def drawDynamic(self):\n for point in self.points:\n self.canvas.create_oval(\n point[0] - POINT_RADIUS_HALF,\n point[1] - POINT_RADIUS_HALF,\n point[0] + POINT_RADIUS_HALF,\n point[1] + POINT_RADIUS_HALF,\n fill=\"#ff0000\",\n outline=\"#ff0000\"\n )\n\n def onExit(self):\n self.quit()\n\n def onFileOpen(self):\n path = filedialog.askopenfilename()\n if path is None:\n return\n self.dicomHandler = DicomHandler(self, path, False)\n images, meta = self.dicomHandler.parseFile(self.dicomHandler.path)\n self.imagesMeta = [meta]\n self.getPointsForImages(images)\n\n def onDirOpen(self):\n path = filedialog.askdirectory()\n if path is None:\n return\n self.dicomHandler = DicomHandler(self, path, True)\n images, meta = self.dicomHandler.parseDirectory()\n self.imagesMeta = meta\n self.getPointsForImages(images)\n\n def onMouseClicked(self, event):\n if self.imageShown:\n point = (event.x, event.y)\n self.points.append(point)\n self.redraw()\n\n def onMouseRightClicked(self, event):\n if self.imageShown and len(self.points) != 0:\n point = (event.x, event.y)\n self.points = [x for x in self.points if abs(x[0] - point[0]) > 3 or abs(x[1] - point[1]) > 3]\n self.redraw()\n\n def onNextImage(self):\n self.pointsForImages.append(self.points)\n self.imageCount += 1\n self.points = []\n if len(self.imagesToShow) > self.imageCount:\n self.showImage(self.imagesToShow[self.imageCount])\n else:\n messagebox.showinfo(\"No more images were opened\", \"You have pointed all images, export or preview now\")\n\n def getAllPoints(self):\n return self.pointsForImages + [self.points]\n\n def getXYZPoints(self):\n X = []\n Y = []\n Z = []\n for imageIndex, pointsForImage in enumerate(self.getAllPoints()):\n for point in pointsForImage:\n meta = self.imagesMeta[imageIndex]\n x, y, z = getVoxelPosition(point, meta[\"pixelSpacing\"], meta[\"imagePosition\"], meta[\"imageOrientation\"])\n X.append(x)\n Y.append(y)\n Z.append(z)\n return X, Y, Z\n\n def onPreview(self):\n X, Y, Z = self.getXYZPoints()\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(X, Y, Z)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n plt.show()\n\n def getMetrics(self):\n X, Y, Z = self.getXYZPoints()\n xMax, xMin = np.amax(X), np.amin(X)\n yMax, yMin = np.amax(Y), np.amin(Y)\n zMax, zMin = np.amax(Z), np.amin(Z)\n return abs(xMax - xMin), abs(yMax - yMin), abs(zMax - zMin)\n\n def onMetrics(self):\n x, y, z = self.getMetrics()\n messagebox.showinfo(\"Metrix\", \"X metrix is \" + str(x) + \"mm \\n\"\n \"Y metrix is \" + str(y) + \"mm \\n\"\n \"Z metrix is \" + str(z) + \"mm \\n\"\n )\n\n def onExport(self):\n includeMetrics = messagebox.askyesno(\"Export\", \"Would tou like to include metrics?\")\n path = filedialog.asksaveasfilename(defaultextension=\".json\")\n if path is None:\n return\n\n x, y, z = self.getXYZPoints()\n data = {\n \"points\": {}\n }\n data[\"points\"][\"x\"] = x\n data[\"points\"][\"y\"] = y\n data[\"points\"][\"z\"] = z\n\n if includeMetrics:\n x, y, z = self.getMetrics()\n data[\"metrics\"] = {}\n data[\"metrics\"][\"x\"] = x\n data[\"metrics\"][\"y\"] = y\n data[\"metrics\"][\"z\"] = z\n data[\"metrics\"][\"units\"] = \"mm\"\n\n with open(path, 'w') as outfile:\n json.dump(data, outfile)\n\n messagebox.showinfo(\"Exported\", \"Operation is successful you can obtain your file at '\" + path + \"'\")\n\n\n def onReference(self):\n messagebox.showinfo(\"Reference\", \"Menu: \\n\"\n \"Controlls -> Next: next picture in sequence \\n\"\n \"Controlls -> Export: Exports points data in .json format \\n\"\n \"Image proccessing: \\n\"\n \"To create point: left mouse click on canvas\\n\"\n \"To remove point: right mouse click on canvas near the corresponding point\\n\"\n )\n","sub_path":"Window.py","file_name":"Window.py","file_ext":"py","file_size_in_byte":8157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"554024557","text":"import unittest\nimport urllib, urllib2, os\t\t\n\nclass TestREST(unittest.TestCase):\t\t\n\tdef test_home(self):\n\t\treq = urllib2.Request('http://localhost/rmehta/wnframework-client/')\n\t\treq.get_method = lambda: 'GET'\n\t\tresponse = urllib2.urlopen(req)\n\t\tself.assertTrue(response.getcode()==200)\n\nif __name__=='__main__':\n\tunittest.main()","sub_path":"py/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"205905717","text":"from PyQt5 import QtCore, QtSerialPort, QtWidgets\r\nimport numpy as np\r\nimport datetime\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nintervalNum = 100 # Number of intervals to be recorded\r\nreplicaNum = 2 # Number of replicas to record\r\nverbose = 0 # set verbose True or False, or 1 or 0 if want to see more info\r\nsave_data = False # set True or False or 1 or 0 to save data\r\ncounter_comm = 0 # counter to start data taking\r\ncounter = [] # counter list of data taken\r\ninterval_nums = [] # interval numbers\r\nintervals = np.ones((replicaNum,intervalNum)) # Declare intervals array, and fill it with ones, shape of replica number and interval number\r\nreplica = 0 # replicas started at 0\r\ninterval = 0 # intervals started at 0\r\n#take_data = False # taking data set to false, not needed but was used with pyserial so was put here at first\r\n\r\n#app = QtCore.QCoreApplication([]) \r\napp = QtWidgets.QApplication([]) # qapp initialization \r\nprint('Connecting to Arduino') # print out\r\n\r\nserial_port = QtSerialPort.QSerialPort('COM4') # set the com port and create serial port instance\r\n\r\nserial_port.open(QtCore.QIODevice.ReadWrite) # open serial port to read and write\r\n\r\nserial_port.setDataTerminalReady(1) # set dtr to 1,0, then 1\r\nserial_port.setDataTerminalReady(0)\r\nserial_port.setDataTerminalReady(1)\r\n\r\n\r\ndef handle_ready_read(): # slot function handle readyread signal\r\n global counter_comm # made global so can be read in and out of function\r\n global replica\r\n global interval\r\n #global take_data\r\n while serial_port.canReadLine(): # while a readline is available\r\n try: # try if no exception\r\n if verbose: print('Waiting for response...') # print if verbose\r\n resp = serial_port.readLine().data().decode('ISO-8859-1').replace('\\n','').replace('\\r','').strip() # readline from serial port, get data, decode single bytes with latin-1, get rid of \\n, \\r and spaces \r\n if verbose: print('Got response: ' + resp) # print resp if verbose\r\n if resp == \"Overrun\": # if resp is overrun\r\n serial_port.close() # close port\r\n raise RuntimeError(\"Arduino reports overrun\") # this could be tested, raise error\r\n app.quit() # quit pyqt eventloop\r\n #break\r\n if resp == 'Geiger 2018': # if resp is this\r\n print('Arduino is communicating') # print out\r\n counter_comm = len(counter) # set counter_comm to length of counter so can wait a few passes to take data\r\n #take_data = True # set to true, here if needed for some reason\r\n if len(counter) == counter_comm + 3:# and take_data == True: # if 3 have pass, start taking data\r\n print('Starting data collection') # print out\r\n if len(counter) > counter_comm + 3:# and take_data == True: # take data after more than 3 passes\r\n interval_nums.append(int(resp)) # add interval as int\r\n print(f\"Replica # {replica+1}. Interval # {interval+1}. Interval length received: {resp}\\n\") # print info\r\n intervals[replica,interval] = interval_nums[(replica+1)*(interval+1)-1] # fill intervals array with intervals\r\n interval += 1 # increment interval\r\n if len(interval_nums) == (replica+1)*intervalNum: # if length of intervalnum has been reached\r\n replica += 1 # increment replica\r\n interval = 0 # reset interval to 0\r\n if len(interval_nums) == (intervalNum * replicaNum): # if total data has been taken\r\n print('Intervals:', interval_nums) # print intervals\r\n serial_port.close() # closer serial port\r\n app.quit() # quit pyqt eventloop\r\n #break\r\n counter.append(1) # add to counter\r\n\r\n except ValueError as e: # if readline gives error\r\n print(\"error\", e) # print error\r\n\r\n\r\nserial_port.readyRead.connect(handle_ready_read) # signal for readyread to be handled\r\nbuf = serial_port.clear() # clear the buffer \r\nif verbose: print(f'Values in buffer cleared: {buf} ') # print amount from cleared buffer\r\n#app.setQuitOnLastWindowClosed(True) # could be needed \r\napp.exec_() # run pyqt eventloop\r\n\r\n\r\nfor i in range(replicaNum): # Histogram graphing section, to visually check if the data is decent.\r\n plt.hist(intervals[i,:],) # plot histograms of intervals per replica\r\n plt.title(f\"Replica #{i+1}\") # print title of replicas\r\n\r\n# Save the file and close the serial device\r\nif save_data: # if save data true\r\n fileName = datetime.datetime.now().strftime(\"%Y_%m_%d_%H_%M_%S\") + \"_int\" + str(intervalNum) + \"_rep\" + str(replicaNum) + \"_DWELL_TIME_DATA.csv\" # create save data filename\r\n print(\"Data saved as \", fileName) # print file saved\r\n np.savetxt(fileName, intervals, delimiter =\",\") # save file\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Time_timer_nogui.py","file_name":"Time_timer_nogui.py","file_ext":"py","file_size_in_byte":4869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"407269370","text":"from tflite_runtime.interpreter import Interpreter\nimport numpy as np\nimport argparse\nfrom PIL import Image\n\nparser = argparse.ArgumentParser(description='Fingerprint Classification')\nparser.add_argument('--filename', type=str, help='Specify the filename', required=True)\nparser.add_argument('--model_path', type=str, help='Specify the model path', required=True)\nparser.add_argument('--label_path', type=str, help='Specify the label map', required=True)\nparser.add_argument('--top_k', type=int, help='How many top results', default=3)\n\nargs = parser.parse_args()\n\nfilename = args.filename\nmodel_path = args.model_path\nlabel_path = args.label_path\ntop_k_results = args.top_k\n\nwith open(label_path, 'r') as f:\n labels = list(map(str.strip, f.readlines()))\n\n# Load TFLite model and allocate tensors\ninterpreter = Interpreter(model_path=model_path)\ninterpreter.allocate_tensors()\n\n# Get input and output tensors.\ninput_details = interpreter.get_input_details()\noutput_details = interpreter.get_output_details()\n\n# Read image\nimg = Image.open(filename).convert('RGB')\n\n# Get input size\ninput_shape = input_details[0]['shape']\nsize = input_shape[:2] if len(input_shape) == 3 else input_shape[1:3]\n\n# Preprocess image\nimg = img.resize(size)\nimg = np.array(img, dtype=np.float32)\nimg = img / 255.0\n\n# Add a batch dimension\ninput_data = np.expand_dims(img, axis=0)\n\n# Point the data to be used for testing and run the interpreter\ninterpreter.set_tensor(input_details[0]['index'], input_data)\ninterpreter.invoke()\n\n# Obtain results and map them to the classes\npredictions = interpreter.get_tensor(output_details[0]['index'])\npredicted_label = np.argmax(predictions)\nprint(labels[predicted_label])\n","sub_path":"piFingerprint.py","file_name":"piFingerprint.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"461351290","text":"import random\nimport time\n\n##\ndef bubbleSort(ary):\n n = len(ary)\n for end in range(n-1, 0, -1):\n noChange = True\n for cur in range(0, end):\n if ary[cur] > ary[cur+1]:\n ary[cur], ary[cur+1] = ary[cur+1], ary[cur]\n noChange = False\n if noChange:\n break\n return ary\n\ndef qSort(arr, start, end):\n if end <= start:\n return\n\n low = start\n high = end\n\n pivot = arr[(low + high)//2]\n while low <= high:\n while arr[low] < pivot:\n low += 1\n while arr[high] > pivot:\n high -= 1\n if low <= high:\n arr[low], arr[high] = arr[high], arr[low]\n low, high = low+1, high-1\n\n mid = low\n\n qSort(arr, start, mid-1)\n qSort(arr, mid, end)\n\ndef quickSort(ary):\n qSort(ary, 0, len(ary)-1)\n\n##\ntempAry = [random.randint(10000, 99999) for _ in range(1000000)]\ntempAry.sort()\n\nrandomPos = random.randint(0, len(tempAry)-1)\nprint('데이터 개수 : 1000000 개')\nprint('삽입된 위치 :', randomPos)\n\nbubAry = tempAry[:]\nquickAry = tempAry[:]\n\nstart = time.time()\nbubbleSort(bubAry)\nend = time.time()\nprint('버블 시간 :', end-start)\n\nstart = time.time()\nquickSort(quickAry)\nend = time.time()\nprint('퀵 시간 :', end-start)\n","sub_path":"12-99-exam-02-01-my.py","file_name":"12-99-exam-02-01-my.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"165371244","text":"#!/usr/bin/env python3.9\n\nimport sys\n\nros_path = '/opt/ros/kinetic/lib/python2.7/dist-packages'\n\nimport rospy\nfrom brendan_ur5e.msg import image_data\nimport std_msgs\n\nif ros_path in sys.path:\n sys.path.remove(ros_path)\n \nimport pyrealsense2.pyrealsense2 as rs\nimport numpy as np\nimport cv2\n\nsys.path.append('/opt/ros/kinetic/lib/python2.7/dist-packages')\n\n# Configure depth and color streams\npipeline = rs.pipeline()\nconfig = rs.config()\nconfig.enable_device('841612070149') #right side of table (desk side)\n\n# Get device product line for setting a supporting resolution\npipeline_wrapper = rs.pipeline_wrapper(pipeline)\npipeline_profile = config.resolve(pipeline_wrapper)\ndevice = pipeline_profile.get_device()\ndevice_product_line = str(device.get_info(rs.camera_info.product_line))\n\nfound_rgb = False\nfor s in device.sensors:\n print(s.get_info(rs.camera_info.name))\nfor s in device.sensors:\n if s.get_info(rs.camera_info.name) == 'RGB Camera':\n found_rgb = True\n break\nif not found_rgb:\n print(\"The demo requires Depth camera with Color sensor\")\n exit(0)\n\n#align_to = rs.stream.color\n#align = rs.align(align_to)\n\n#config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)\nconfig.enable_stream(rs.stream.depth)\nconfig.enable_stream(rs.stream.color)\n\n#if device_product_line == 'L500':\n# config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)\n#else:\n# config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)\n\n# Start streaming\npf = pipeline.start(config)\nprint(f\"device: {pf.get_device()}\")\nprint(f\"depth_sensor: {pf.get_device().first_depth_sensor()}\")\nprint(f\"depth_scale: {pf.get_device().first_depth_sensor().get_depth_scale()}\")\nprint(f\"streams: {pf.get_streams()}\")\n\n\n\nrospy.init_node('cam1_pub', anonymous=True)\npub_rgb = rospy.Publisher('/camera1/rgb/image_muddy', image_data, queue_size=1)\npub_depth = rospy.Publisher('/camera1/depth/image_muddy', image_data, queue_size=10)\n#bridge = CvBridge()\n\nrate = rospy.Rate(30)\ni = 0\ntry:\n while not rospy.is_shutdown():\n \n # Wait for a coherent pair of frames: depth and color\n frames = pipeline.wait_for_frames()\n #print('got frames')\n #aligned_frames = align.process(frames)\n #print('aligned frames')\n #depth_frame = aligned_frames.get_depth_frame()\n #color_frame = aligned_frames.get_color_frame()\n #print('divided frames')\n depth_frame = frames.get_depth_frame()\n color_frame = frames.get_color_frame()\n if not depth_frame or not color_frame:\n continue\n\n # Convert images to numpy arrays\n depth_image = np.asanyarray(depth_frame.get_data())\n depth_image = cv2.convertScaleAbs(depth_image, alpha=0.03)\n color_image = np.asanyarray(color_frame.get_data())\n\n depth_colormap_dim = depth_image.shape\n color_colormap_dim = color_image.shape\n \n color_img_msg = image_data(timestamp=color_frame.timestamp, height=color_colormap_dim[0], width=color_colormap_dim[1], length=color_colormap_dim[2], encoding=\"rgb8\", data=color_image.flatten().tolist())\n #color_img_msg = image_data(data=color_image.flatten().tolist())\n \n depth_img_msg = image_data(timestamp=depth_frame.timestamp, height=depth_colormap_dim[0], width=depth_colormap_dim[1], length=1, encoding=\"mono8\", data=depth_image.flatten().tolist())\n \n pub_rgb.publish(color_img_msg)\n pub_depth.publish(depth_img_msg)\n i = i + 1\n rate.sleep()\n \nfinally:\n\n # Stop streaming\n pipeline.stop()\n","sub_path":"brendan_ur5e/src/scripts/cam1_pub.py","file_name":"cam1_pub.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"191139425","text":" \n #This example uses Python 2.7 and the python-request library.\n\nfrom requests import Request, Session\nfrom requests.exceptions import ConnectionError, Timeout, TooManyRedirects\nimport json\nimport config\nfrom prettytable import PrettyTable\nfrom colorama import Fore, Back, Style\n\nurl = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest'\nparameters = {\n #'id': '1'\n #'symbol':'ETH,BTG,ETC,CLO,RVN,ZEL,BEAM,CKB,GRIN,ETP,ZCL,BTCZ,MOAC,ZEC,PIRL,XMR,EXP,AE,ZEN'\n 'start':'1',\n 'limit':'2200',\n 'convert':'USD'\n}\nheaders = {\n 'Accepts': 'application/json',\n 'X-CMC_PRO_API_KEY': config.key,\n}\n\nsession = Session()\nsession.headers.update(headers)\ntable = PrettyTable()\n\nsymbol_name = [\"XZC\",\"ETH\", \"BTG\", \"ETC\", \"CLO\", \"RVN\", \"ZEL\", \"BEAM\", \"CKB\", \"GRIN\", \"ETP\", \"ZCL\", \"BTCZ\", \"MOAC\", \"ZEC\", \"PIRL\", \"XMR\", \"EXP\", \"AE\", \"ZEN\"]\n\nresponse = session.get(url, params=parameters)\ndata = json.loads(response.text)\nfor currency in data['data']:\n rank = currency['cmc_rank']\n name = currency['name']\n symbol = currency['symbol']\n market_cap = currency['quote']['USD']['market_cap']\n hour_change = currency['quote']['USD']['percent_change_1h']\n day_change = currency['quote']['USD']['percent_change_24h']\n week_change = currency['quote']['USD']['percent_change_7d']\n price = currency['quote']['USD']['price']\n volume = currency['quote']['USD']['volume_24h']\n\n if hour_change is not None:\n if hour_change > 0:\n hour_change = Back.GREEN + str(hour_change) + '%' + Style.RESET_ALL\n else:\n hour_change = Back.RED + str(hour_change) + '%' + Style.RESET_ALL\n\n if day_change is not None:\n if day_change > 0:\n day_change = Back.GREEN + str(day_change) + '%' + Style.RESET_ALL\n else:\n day_change = Back.RED + str(day_change) + '%' + Style.RESET_ALL\n\n if week_change is not None:\n if week_change > 0:\n week_change = Back.GREEN + str(week_change) + '%' + Style.RESET_ALL\n else:\n week_change = Back.RED + str(week_change) + '%' + Style.RESET_ALL\n\n if volume is not None:\n volume_string = '{:,}'.format(volume)\n\n if market_cap is not None:\n market_cap_string = '{:,}'.format(market_cap)\n\n if symbol in symbol_name: \n table.add_row([rank, name + ' ( ' + symbol + ')', '$' + str(price), '$' + str(market_cap), \n '$' + volume_string, str(hour_change), str(day_change), str(week_change)])\n\ntable.field_names = [\"Rank\",\"Name(Symbol)\",\"Price\",\"Market Cap\", \"Volume\", \"Hour Change\", \"Day Change\", \"Week Change\"]\ntable.sortby = \"Day Change\"\ntable.reversesort = True\nprint(table)\n\n ","sub_path":"py/cmcapi/start3.py","file_name":"start3.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"492901517","text":"import signal\n\n\nasync def cancel_on_controlC(nursery):\n def signal_handler(sig, frame):\n print('You pressed Ctrl+C!')\n nursery.cancel_scope.cancel()\n\n signal.signal(signal.SIGINT, signal_handler)\n\n\n'''\n# how to use:\n\nasync def test_cntl():\n async with trio.open_nursery() as nursery:\n await cancel_on_cntl_c(nursery)\n await trio.sleep(9999)\n'''","sub_path":"trio_util/trio_util/nursery_utils.py","file_name":"nursery_utils.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"87339444","text":"import random\nclass Config(object):\n \"\"\"This is a description of Config\"\"\"\n BESTBUY_API_PATH = \"../bestbuy_apikey.txt\"\n BESTBUY_URL_BASE = \"https://api.bestbuy.com/v1/products\"\n BESTBUY_MAX_RESULTS = 100\n BESTBUY_RESP_FILTER = ['name', 'salePrice']\n\n BESTBUY_CATPATH = \"(platform=Xbox%20One\" \\\n \"|(platform=psp\" \\\n \"|(platform=PlayStation%204\" \\\n \"|(platform=Nintendo%203DS\" \\\n \"|(platform=Wii%20U\" \\\n \")))))\"\n AMAZON_SEARCH_URL_BASE = \"https://www.amazon.com/s/ref=nb_sb_noss?field-keywords=\"\n AMAZON_SEARCH_PIVOT = \"s-color-twister-title-link\"\n AMAZON_OTHERSELLERS_URL_BASE = \"https://www.amazon.com/gp/offer-listing/\"\n AMAZON_OTHERSELLERS_URL_ARGS = \"/ref=dp_olp_all_mbc?ie=UTF8&f_all=true&f_new=true&sort=taxsip\"\n AMAZON_PRODUCTLINK_URL_PIVOT = \"/dp/\"\n AMAZON_SELLERRANK_PIVOT = \"in Video Games\"\n\n AMAZON_OTHERSELLERS_PROPRIETARY_TAG = \"alt=\\\"Amazon.com\\\"\"\n AMAZON_OTHERSELLERS_SELLERINFO_PIVOT = \"olpSellerName\"\n AMAZON_OTHERSELLERS_SELLERINFO_END_PIVOT = \"\"\n AMAZON_OTHERSELLERS_OFFERPRICE_PIVOT = \"olpOfferPrice\"\n AMAZON_OTHERSELLERS_SHIPPINGPRICE_PIVOT = \"olpShippingPrice\"\n AMAZON_OTHERSELLERS_TAXPRICE_PIVOT = \"olpEstimatedTaxText\"\n\n REQ_HEADERS_LIST = [\n {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\n \"Accept-Encoding\": \"gzip, deflate, sdch, br\",\n \"Accept-Language\": \"en-US,en;q=0.8\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36\",\n },\n {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\n \"Accept-Encoding\": \"gzip, deflate, sdch, br\",\n \"Accept-Language\": \"en-US,en;q=0.8\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:53.0) Gecko/20100101 Firefox/53.0\",\n },\n {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\n \"Accept-Encoding\": \"gzip, deflate, sdch, br\",\n \"Accept-Language\": \"en-US,en;q=0.8\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393\"\n },\n {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\n \"Accept-Encoding\": \"gzip, deflate, sdch, br\",\n \"Accept-Language\": \"en-US,en;q=0.8\",\n \"User-Agent\": \"Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko\"\n },\n {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\n \"Accept-Encoding\": \"gzip, deflate, sdch, br\",\n \"Accept-Language\": \"en-US,en;q=0.8\",\n \"User-Agent\": \"Mozilla/5.0 (Linux; Android 6.0.1; SAMSUNG SM-N910F Build/MMB29M) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/4.0 Chrome/44.0.2403.133 Mobile Safari/537.36\"\n }\n ]\n\n @staticmethod\n def get_next_header():\n index = random.randint(0, len(Config.REQ_HEADERS_LIST) - 1)\n head = Config.REQ_HEADERS_LIST[index]\n return head\n\n @staticmethod\n def readBestbuyAPIKey():\n file = open(Config.BESTBUY_API_PATH, 'r')\n key = file.read().strip()\n return key","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"196347680","text":"import unittest\n\nimport numpy as np\n\nfrom lp import GameAgent\n\n\nclass TestRPS(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.agent = GameAgent()\n\n def test_case_1(self):\n R = [\n [0, 1, -1], [-1, 0, 1], [1, -1, 0]\n ]\n\n np.testing.assert_almost_equal(\n self.agent.solve(R),\n np.array([0.333, 0.333, 0.333]),\n decimal=3\n )\n\n def test_case_2(self):\n R = [[0, 2, -1],\n [-2, 0, 1],\n [1, -1, 0]]\n\n np.testing.assert_almost_equal(\n self.agent.solve(R),\n np.array([0.250, 0.250, 0.500]),\n decimal=3\n )\n\n\nunittest.main(argv=[''], verbosity=2, exit=False)\n","sub_path":"testGameAgent.py","file_name":"testGameAgent.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"66876536","text":"from django.urls import path\nfrom webapp.views import BookListView, UserView, BookDetailView, BookDeleteView, BookCreateView, AuthorCreateView, AuthorDetailView, AuthorListView, BookUpdateView, AuthorDeleteView, AuthorUpdateView\n\napp_name = 'webapp'\n\nurlpatterns = [\n path('', BookListView.as_view(), name='book_list'),\n path('book/', BookDetailView.as_view(), name='book_detail'),\n path('book/', BookUpdateView.as_view(), name='book_update'),\n path('book/create/', BookCreateView.as_view(), name='book_create'),\n path('book/', BookDeleteView.as_view(), name='book_delete'),\n path('authors/', AuthorUpdateView.as_view(), name='author_update'),\n path('authors/', AuthorDeleteView.as_view(), name='author_delete'),\n path('users/', UserView.as_view(), name='user_info'),\n path('authors/', AuthorDetailView.as_view(), name='author_detail'),\n path('authors/create/', AuthorCreateView.as_view(), name='author_create'),\n path('authors/', AuthorListView.as_view(), name='author_list')\n]","sub_path":"library/webapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"508841563","text":"# Time: O(n)\n# Space: O(1)\n\n'''\nGiven a non-negative number represented as an array of digits, plus one to the number.\n\nThe digits are stored such that the most significant digit is at the head of the list.\n'''\n\nclass Solution(object):\n def plusOne(self, digits):\n \"\"\"\n :type digits: List[int]\n :rtype: List[int]\n \"\"\"\n if len(digits) == 0: return [1]\n \n if digits[-1] != 9: \n digits[-1] += 1\n return digits\n else:\n index = 1\n while digits[-index] == 9 and index < len(digits):\n index += 1\n \n if index == len(digits) and digits[-index] == 9:\n digits[0] = 1\n digits[1:] = [0] * (len(digits) - 1) + [0]\n else:\n digits[-index] += 1\n digits[-index+1:] = [0] * (index - 1)\n \n return digits\n\n\n def plusOne_2(self, digits):\n \"\"\"\n :type digits: List[int]\n :rtype: List[int]\n \"\"\"\n carry = 1\n\n for index in range(len(digits), 0, -1):\n digits[index-1] += carry # 9+1 = 10 but we only 1 digit on digits[index-1]\n carry = digits[index-1] / 10 # decide if the case is 9+1 or any+1\n digits[index-1] %= 10 # redefine digits[index-1] as only 1 digit\n\n if carry == 0:\n break\n \n if carry:\n digits = [1] + digits\n \n return digits","sub_path":"plus-one.py","file_name":"plus-one.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"594561645","text":"# -*- coding: utf-8 -*-\nimport logging\n\nfrom django.views.decorators.http import require_POST, require_GET\n\nfrom luckycommon.utils.api import token_required\n\nfrom luckycommon.utils.decorator import response_wrapper\nfrom luckycommon.level import handler as level_handler\nfrom luckycommon.utils.exceptions import ParamError\n\n_LOGGER = logging.getLogger('lucky')\n\n\n@require_GET\n@response_wrapper\n@token_required\ndef get_my_level(req):\n return level_handler.get_level_dict(int(req.user_id))\n\n\n@require_POST\n@response_wrapper\n@token_required\ndef activate_coupon(req):\n try:\n level = int(req.POST.get('level'))\n except:\n raise ParamError('parameter `level` invalid')\n if level_handler.is_coupon_available(int(req.user_id), level):\n level_handler.activate_coupon(int(req.user_id), level)\n return {}\n else:\n raise ParamError('coupon not available')\n\n\n@require_GET\n@response_wrapper\ndef get_level_rank(req):\n rank_list = level_handler.get_rank()\n return {'list': rank_list,\n 'count': len(rank_list)}\n\n\n@require_GET\n@response_wrapper\ndef get_level_week_rank(req):\n year = req.GET.get('year')\n week = req.GET.get('week')\n if week and year:\n rank_list = level_handler.get_week_rank(int(year), int(week))\n else:\n rank_list = level_handler.get_current_week_rank()\n return {'list': rank_list,\n 'count': len(rank_list)}\n\n\n@require_GET\n@response_wrapper\ndef get_level_week_rank_list(req):\n rank_list = level_handler.get_week_rank_list()\n return {'list': rank_list,\n 'count': len(rank_list)}\n","sub_path":"luckyapi/views/level.py","file_name":"level.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"221532497","text":"import os\n\nfrom flask import Blueprint, render_template, Markup, request\nfrom werkzeug.exceptions import HTTPException, NotFound, InternalServerError\n\nfrom docs.services import site, index\n\n\nblueprint = Blueprint('docs', __name__, url_prefix='')\n\n\n@blueprint.route('/search', methods=['GET'])\ndef search():\n \"\"\"Handle a search request.\"\"\"\n q = request.args.get('q')\n results = index.find(q)\n return render_template('docs/search.html', results=results, q=q)\n\n\n@blueprint.route('/', defaults={'path': ''})\n@blueprint.route('/')\ndef from_sitemap(path: str):\n \"\"\"Route the request dynamically, based on the site map.\"\"\"\n try:\n page = site.load_page(path)\n except site.PageNotFound as e:\n raise NotFound('Nope') from e\n except site.PageLoadFailed as e:\n raise InternalServerError('Nope') from e\n context = dict(page=page, content=Markup(page.markdown),\n pagetitle=page.title)\n return render_template('docs/page.html', **context)\n","sub_path":"docs/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"214914934","text":"import sys\nimport os\nfrom PyQt5.QtWidgets import QWidget, QApplication, QMainWindow, \\\n QVBoxLayout, QPushButton, QComboBox, QHBoxLayout, QLabel, QLineEdit, QGroupBox, QCheckBox, QMenu, QTabWidget\nfrom PyQt5.QtCore import QSize, QSettings, QObject, pyqtSignal\nfrom ServersTable import ServersTable\nfrom ConnectionPanel import ConnectionPanel\n\nSETTINGS_PATH = os.path.join(os.getcwd(), \"settings.ini\")\n\n\nclass ChangeTab(QObject):\n tabChanged = pyqtSignal()\n\n\nclass MainWindowProject(QMainWindow):\n def __init__(self, first_time):\n super().__init__()\n # Window settings\n self.setFixedSize(QSize(480, 360))\n self.setWindowTitle(\"RDP/VDP\")\n if first_time:\n with open(SETTINGS_PATH, 'w') as first_settings:\n first_settings.write('[General]\\nlast_login = \\n')\n first_settings.write('\\n')\n first_settings.write('[Login]\\nlast_login=\\n')\n first_settings.write('[Servers]\\ntest_server=add.your.server')\n self.mainWidget = MainWidget(self)\n self.setCentralWidget(self.mainWidget)\n\n self.show()\n\n\nclass MainWidget(QWidget):\n def __init__(self, parent,):\n super(QWidget, self).__init__(parent)\n self.layout = QVBoxLayout(self)\n\n self.ini = QSettings(SETTINGS_PATH, QSettings.IniFormat)\n self.ini.setIniCodec(\"utf-8\")\n # tabs for application\n self.tabs = QTabWidget()\n self.mainTab = ConnectionPanel(settings=SETTINGS_PATH)\n self.serversTab = ServersTable(settings=SETTINGS_PATH)\n self.tabs.resize(300, 200)\n # Add tabs\n self.tabs.addTab(self.mainTab, \"Main\")\n self.tabs.addTab(self.serversTab, \"Servers\")\n self.tabs.currentChanged.connect(self.tab_changed)\n \n self.layout.addWidget(self.tabs)\n self.setLayout(self.layout)\n\n def tab_changed(self):\n if self.tabs.currentWidget() == self.mainTab:\n if self.serversTab.save_settings_change_tab() == \"Value Error\":\n self.tabs.setCurrentIndex(2)\n self.mainTab.reload_servers()\n\n\nif __name__ == '__main__':\n if not os.path.exists(SETTINGS_PATH):\n first_time = True\n else:\n first_time = False\n app = QApplication(sys.argv)\n window = MainWindowProject(first_time)\n sys.exit(app.exec_())","sub_path":"MainWindowProject.py","file_name":"MainWindowProject.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"649838642","text":"from functools import wraps\nfrom flask import current_app as app\nfrom flask_restful import Resource\n\n\ndef sanitize_response(response):\n '''\n this method will unpack reponse to seperate out headers,\n status and response body.\n '''\n data = None\n status = 200\n headers = {}\n if isinstance(response, tuple) and len(response) is 3:\n (data, status, headers) = response\n else:\n if 'status' in response and type(response['status']) == int:\n status = response['status']\n elif 'errorCode' in response:\n status = response['errorCode']\n data = response\n return (data, status, headers)\n\n\ndef patch_response_data(func):\n '''\n This will patch response in standard format.\n so that every time no need to create response format in api code\n '''\n @wraps(func)\n def wrapper(*args, **kwargs):\n data, status, headers = sanitize_response(func(*args, **kwargs))\n patched = isinstance(data, dict) and (\n \"errorCode\" in data or \"responseData\" in data\n )\n\n if not (patched in data):\n data = {\n \"responseData\": data\n }\n\n return (data, status, headers)\n return wrapper\n\n\nclass Resource(Resource):\n '''\n This class is overrided using flask resource file.\n So that we can add our custom functions into it.\n Derive any api class from this class.\n '''\n def options(self, *args, **kwargs):\n return \"OK\"\n\n options.authenticated = False\n method_decorators = [patch_response_data]\n","sub_path":"app/utils/resource.py","file_name":"resource.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"117803225","text":"import cv2\nimport numpy as np\nimport PlantDetectHelper as pdh\n\n# Read the dandelion image\ndef detectColorDandelion(img):\n imgContour = img.copy()\n\n colorImg = pdh.applyHSV(img, 0, 37, 123, 255, 206, 255)\n\n imgGray = cv2.cvtColor(colorImg,cv2.COLOR_BGR2GRAY)\n imgBlur = cv2.GaussianBlur(imgGray,(7,7),1)\n imgCanny = cv2.Canny(imgBlur,50,50)\n pdh.getContours(imgCanny, imgContour)\n\n imgBlank = np.zeros_like(img)\n imgStack = pdh.stackImages(4, [imgContour])\n\n cv2.imshow(\"Stack\", imgStack)\n\n cv2.waitKey(0)\n\ndef main():\n img = cv2.imread(\"Resources/Dandelion_Field.jfif\")\n detectColorDandelion(img)\n\nif __name__ == \"__main__\": main()\n","sub_path":"PlantDetector/PlantDetector.py","file_name":"PlantDetector.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"322198415","text":"\"\"\"\nThis file provides functionality of remotely controlling the content on the wall\nby command line.\n\"\"\"\n\nimport argparse\n\n\nimport Pyro4\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-f\", \"--func_name\", type=str, required=True, help=\"name of the funtion to be run\")\nap.add_argument(\"-n\", \"--name\", type=str, required=True, help=\"the name of the widget\")\nap.add_argument(\"-x\", \"--xpos\", type=int, help=\"x coordinate of the new position\")\nap.add_argument(\"-y\", \"--ypos\", type=int, help=\"y coordinate of the new position\")\nap.add_argument(\"-w\", \"--width\", type=int, help=\"new width of the widget\")\nap.add_argument(\"-ht\", \"--height\", type=int, help=\"new height of the widget\")\nap.add_argument(\"-u\", \"--uri\", type=str, help=\"uri of the source\")\nap.add_argument(\"-s\", \"--screen\", type=str, required=True, help=\"IP address of the screen\")\nap.add_argument(\"-cl\", \"--crop_left\", type=int, help=\"Crop left as pixel\")\nap.add_argument(\"-cr\", \"--crop_right\", type=int, help=\"Crop right as pixel\")\nap.add_argument(\"-ct\", \"--crop_top\", type=int, help=\"Crop top as pixel\")\nap.add_argument(\"-cb\", \"--crop_bottom\", type=int, help=\"Crop bottom as pixel\")\n\n\nargs = vars(ap.parse_args())\n\n\nfunction_name = args[\"func_name\"]\nscreen_ip = args[\"screen\"]\nscreen = Pyro4.Proxy(\"PYRONAME:{}\".format(screen_ip)) # bu komutun çalışacağı ekranı temsil ediyor.\n\n\ndef move_to(args):\n \"\"\"\n python remote_machine_client.py -n\"local video\" -f=\"move_to\" -s=192.168.1.35 -x=600 -y=400\n :param args:\n :return:\n \"\"\"\n name = args[\"name\"]\n xpos = args[\"xpos\"]\n ypos = args[\"ypos\"]\n screen.move_widget(name=name, xpos=xpos, ypos=ypos)\n\n\ndef resize(args):\n \"\"\"\n python remote_machine_client.py -n\"local video\" -f=\"resize\" -s=192.168.1.35 -w=600 -ht=400\n :param args:\n :return:\n \"\"\"\n name = args[\"name\"]\n width = args[\"width\"]\n height = args[\"height\"]\n screen.resize(name=name, width=width, height=height)\n\n\ndef remove(args):\n \"\"\"\n sample usage:\n python remote_machine_client.py -n\"local video\" -f=\"remove\" -s=192.168.1.35\n :param args:\n :return:\n \"\"\"\n name = args[\"name\"]\n screen.remove_widget(name=name)\n\n\ndef add_source(args):\n \"\"\"\n sample usages:\n\n python remote_machine_client.py -f=\"add_source\" \\\n -n=\"rtsp source\" -u=rtsp://10.0.0.143/media/video1 \\\n -x=640 -y=0 -w=1280 -ht=720 -s=10.0.0.30\n\n\n python remote_machine_client.py -n\"local video\" \\\n -u=\"file:///home/kemal/Developer/vdwll/media/brbad.mp4\" \\\n -x=100 -y=100 -w=1000 -ht=400 -f=\"add_source\" \\\n -s=192.168.1.35\n\n python remote_machine_client.py -n\"local video\" \\\n -u=\"https://www.freedesktop.org/software/gstreamer-sdk/data/media/sintel_trailer-480p.webm\" \\\n -x=100 -y=100 -w=800 -ht=400 -f=\"add_source\" \\\n -s=192.168.1.35\n\n python remote_machine_client.py -n\"local video\" \\\n -u=\"rtsp://10.0.0.143/media/video1\" \\\n -x=100 -y=100 -w=800 -ht=400 -f=\"add_source\" \\\n -s=10.0.0.27\n\n rtsp://78.188.204.20/media/video1\n rtsp://10.0.0.143/media/video1\n\n :param args:\n :return:\n \"\"\"\n name = args[\"name\"]\n uri = args[\"uri\"]\n xpos = args[\"xpos\"]\n ypos = args[\"ypos\"]\n width = args[\"width\"]\n height = args[\"height\"]\n screen.add_source(name=name, uri=uri, xpos=xpos, ypos=ypos, width=width, height=height)\n\n\ndef change_mod_queue(args):\n \"\"\"\n sample usage:\n python remote_machine_client.py -n\"local video\" -f=\"change_mod_queue\" -s=10.0.0.27 \\\n -w=1920 -ht=1080 -ct=0 -cr=300 -cl=300 -cb=0\n :param args:\n :return:\n \"\"\"\n name = args[\"name\"]\n width = args[\"width\"]\n height = args[\"height\"]\n crop_left = args[\"crop_left\"]\n crop_right = args[\"crop_right\"]\n crop_top = args[\"crop_top\"]\n crop_bottom = args[\"crop_bottom\"]\n\n print(\"crop_left\", crop_left,\n \"crop_right\", crop_right,\n \"crop_top\", crop_top,\n \"crop_bottom\", crop_bottom)\n\n screen.change_mod_queue(name=name,\n width=width,\n height=height,\n crop_left=crop_left,\n crop_right=crop_right,\n crop_top=crop_top,\n crop_bottom=crop_bottom\n )\n\n\nlocals()[function_name](args)\n\nprint(\"done!\")\n","sub_path":"src/remote_machine_client.py","file_name":"remote_machine_client.py","file_ext":"py","file_size_in_byte":4339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"229143557","text":"'''\nAuthor: pw897\n\nThis module has a detect function that takes in an image and\nreturns coordinates of rectangles that contain a bug.\n\n'''\nimport os\nfrom PIL import Image\nimport numpy as np\n\n#Returns the square distance\ndef distS3(x1,y1,z1,x2,y2,z2):\n\treturn (x2-x1)**2 + (y2-y1)**2 + (z2-z1)**2\n\ndef detect(im):\n\tminx = 100000000\n\tminy = 100000000\n\tmaxx = 0\n\tmaxy = 0\n\n\t(height,width) = im.shape[0:2]\n\tprint(height,width)\n\tx = 0\n\twhile(x < width):\n\t\tfor y in range(0,height,2):\n\t\t\tpixel = im[y][x]\n\t\t\tred = pixel[0]\n\t\t\tgreen = pixel[1]\n\t\t\tblue = pixel[2]\n\t\t\td = distS3(red,green,blue,8,81,76)\n\t\t\t#print(d)\n\t\t\tif(d < 5250):\n\t\t\t\tif(x < minx):\n\t\t\t\t\tminx = x\n\t\t\t\telif(x > maxx):\n\t\t\t\t\tmaxx = x\n\t\t\t\tif(y < miny):\n\t\t\t\t\tminy = y\n\t\t\t\telif(y > maxy):\n\t\t\t\t\tmaxy = y\n\t\t\t\tcount = 1\n\t\t\telse:\n\t\t\t\tcount = 10\n\t\tx = x + count\n\treturn [minx,miny,maxx,maxy]\n","sub_path":"Software/bugDetectFast.py","file_name":"bugDetectFast.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"470088875","text":"from include import *\n\n#https://github.com/marvis/pytorch-yolo2/blob/master/FocalLoss.py\n#https://github.com/unsky/focal-loss\nclass FocalLoss2d(nn.Module):\n\n def __init__(self, gamma=2):\n super(FocalLoss2d, self).__init__()\n self.gamma = gamma\n\n def forward(self, logit, target, class_weight=None, type='softmax', is_average=True):\n batch_size,C,H,W = logit.size()\n target = target.view(-1, 1).long()\n\n\n if type=='sigmoid':\n if class_weight is None:\n class_weight = [1]*2 #[0.5, 0.5]\n\n prob = F.sigmoid(logit)\n prob = prob.view(-1, 1)\n prob = torch.cat((1-prob, prob), 1)\n select = torch.FloatTensor(len(prob), 2).zero_().cuda()\n select.scatter_(1, target, 1.)\n\n elif type=='softmax':\n if class_weight is None:\n class_weight =[1]*C #[1/C]*C\n\n logit = logit.permute(0, 2, 3, 1).contiguous().view(-1, C)\n prob = F.softmax(logit,1)\n select = torch.FloatTensor(len(prob), C).zero_().cuda()\n select.scatter_(1, target, 1.)\n\n class_weight = torch.FloatTensor(class_weight).cuda().view(-1,1)\n class_weight = torch.gather(class_weight, 0, target)\n\n prob = (prob*select).sum(1).view(-1,1)\n prob = torch.clamp(prob,1e-6,1-1e-6)\n loss = - class_weight *(torch.pow((1-prob), self.gamma))*prob.log()\n\n loss = loss.view(batch_size,-1).mean(1)\n\n if is_average:\n loss = loss.mean()\n\n return loss\n\n##------------\n\nclass RobustFocalLoss2d(nn.Module):\n #assume top 10% is outliers\n def __init__(self, gamma=2, size_average=True):\n super(RobustFocalLoss2d, self).__init__()\n self.gamma = gamma\n self.size_average = size_average\n\n\n def forward(self, logit, target, class_weight=None, mode='softmax', limit=2):\n self.limit = limit\n target = target.view(-1, 1).long()\n\n if mode=='sigmoid':\n if class_weight is None:\n class_weight = [1]*2 #[0.5, 0.5]\n\n prob = F.sigmoid(logit)\n prob = prob.view(-1, 1)\n prob = torch.cat((1-prob, prob), 1)\n select = torch.FloatTensor(len(prob), 2).zero_().cuda()\n select.scatter_(1, target, 1.)\n\n elif mode=='softmax':\n B,C,H,W = logit.size()\n if class_weight is None:\n class_weight =[1]*C #[1/C]*C\n\n logit = logit.permute(0, 2, 3, 1).contiguous().view(-1, C)\n prob = F.softmax(logit,1)\n select = torch.FloatTensor(len(prob), C).zero_().cuda()\n select.scatter_(1, target, 1.)\n\n class_weight = torch.FloatTensor(class_weight).cuda().view(-1,1)\n class_weight = torch.gather(class_weight, 0, target)\n\n prob = (prob*select).sum(1).view(-1,1)\n prob = torch.clamp(prob,1e-8,1-1e-8)\n\n focus = torch.pow((1-prob), self.gamma)\n #focus = torch.where(focus < 2.0, focus, torch.zeros(prob.size()).cuda())\n focus = torch.clamp(focus,0,self.limit)\n\n\n batch_loss = - class_weight *focus*prob.log()\n\n if self.size_average:\n loss = batch_loss.mean()\n else:\n loss = batch_loss\n\n return loss\n\n\n##------------\n\n\n## http://geek.csdn.net/news/detail/126833\nclass PseudoBCELoss2d(nn.Module):\n def __init__(self):\n super(PseudoBCELoss2d, self).__init__()\n\n def forward(self, logit, truth, is_average=True):\n N = len(truth)\n z = logit.view (N,-1)\n t = truth.view (N,-1)\n dim = t.size(1)\n\n loss = z.clamp(min=0) - z*t + torch.log(1 + torch.exp(-z.abs()))\n loss = loss.sum(1)/dim\n\n if is_average:\n loss = loss.sum()/N #w.sum()\n\n return loss\n\n\nclass LogisticMarginLoss(nn.Module):\n def __init__(self):\n super(LogisticMarginLoss, self).__init__()\n\n def forward(self, logit, truth, is_average=True):\n N = len(truth)\n logit = logit.view(N,-1)\n truth = truth.view(N,-1)\n sign = 2. * truth - 1.\n loss = F.soft_margin_loss(logit, sign, reduce = is_average)\n #loss(x, y) = sum_i (log(1 + exp(-y[i]*x[i]))) / x.nelement()\n return loss\n\n\nclass HingeMarginLoss(nn.Module):\n def __init__(self):\n super(HingeMarginLoss, self).__init__()\n\n def forward(self, logit, truth):\n N,C,H,W = truth.shape\n logit = logit.view(N,-1)\n truth = truth.view(N,-1)\n sign = 2. * truth - 1.\n\n hinge = (1. - logit * sign)\n hinge = F.relu(hinge)\n\n loss = hinge.sum()/(N*C*H*W)\n #loss(x, y) = sum_i (log(1 + exp(-y[i]*x[i]))) / x.nelement()\n return loss\n\n\n#https://becominghuman.ai/investigating-focal-and-dice-loss-for-the-kaggle-2018-data-science-bowl-65fb9af4f36c\n#https://www.cs.umanitoba.ca/~ywang/papers/isvc16.pdf\n#https://github.com/ternaus/robot-surgery-segmentation\nclass SoftDiceLoss(nn.Module):\n def __init__(self):\n super(SoftDiceLoss, self).__init__()\n\n\n def forward(self, logit, target, empty_value=0):\n\n prob = F.sigmoid(logit)\n N = target.size(0)\n p = prob.view(N,-1)\n t = target.view(N,-1)\n\n p_sum = p.sum(1)\n t_sum = t.sum(1)\n\n #non-empty\n intersection = (p * t).sum(1)\n union = p_sum + t_sum - intersection + EPS\n dice = 1 - intersection/union\n\n #empty\n empty = torch.zeros(N).fill_(empty_value).cuda()\n\n\n loss = torch.where(t_sum>0, dice, empty)\n loss = loss.sum()/N\n return loss\n\n\n\n\nclass LogDiceLoss(nn.Module):\n def __init__(self):\n super(LogDiceLoss, self).__init__()\n\n\n def forward(self, logit, target, empty_value=0):\n\n prob = F.sigmoid(logit)\n N = target.size(0)\n p = prob.view(N,-1)\n t = target.view(N,-1)\n\n p_sum = p.sum(1)\n t_sum = t.sum(1)\n\n #non-empty\n intersection = (p * t).sum(1)\n union = p_sum + t_sum - intersection + EPS\n dice = intersection/union\n dice = torch.clamp(dice,1e-6,1-1e-6)\n dice = -torch.log(dice)\n\n #empty\n empty = torch.zeros(N).fill_(empty_value).cuda()\n\n\n loss = torch.where(t_sum>0, dice, empty)\n loss = loss.sum()/N\n return loss\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#\n# # https://github.com/bermanmaxim/jaccardSegment/blob/master/losses.py\n# # https://discuss.pytorch.org/t/solved-what-is-the-correct-way-to-implement-custom-loss-function/3568/4\n# class CrossEntropyLoss2d(nn.Module):\n# def __init__(self, weight=None, size_average=True):\n# super(CrossEntropyLoss2d, self).__init__()\n# self.nll_loss = nn.NLLLoss2d(weight, size_average)\n#\n# def forward(self, logits, targets):\n# return self.nll_loss(F.log_softmax(logits), targets)\n#\n# class BCELoss2d(nn.Module):\n# def __init__(self, weight=None, size_average=True):\n# super(BCELoss2d, self).__init__()\n# self.bce_loss = nn.BCELoss(weight, size_average)\n#\n# def forward(self, logits, targets):\n# probs = F.sigmoid(logits)\n# probs_flat = probs.view (-1)\n# targets_flat = targets.view(-1)\n# return self.bce_loss(probs_flat, targets_flat)\n#\n#\n# class StableBCELoss(torch.nn.modules.Module):\n# def __init__(self):\n# super(StableBCELoss, self).__init__()\n# def forward(self, input, target):\n# neg_abs = - input.abs()\n# loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log()\n# return loss.mean()\n\n\n\n\n#\n# ## http://geek.csdn.net/news/detail/126833\n# class WeightedBCELoss2d(nn.Module):\n# def __init__(self):\n# super(WeightedBCELoss2d, self).__init__()\n#\n# def forward(self, logits, labels, weights):\n# w = weights.view(-1)\n# z = logits.view (-1)\n# t = labels.view (-1)\n# loss = w*z.clamp(min=0) - w*z*t + w*torch.log(1 + torch.exp(-z.abs()))\n# loss = loss.sum()/(w.sum()+ 1e-12)\n# return loss\n#\n# class WeightedSoftDiceLoss(nn.Module):\n# def __init__(self):\n# super(WeightedSoftDiceLoss, self).__init__()\n#\n# def forward(self, logits, labels, weights):\n# probs = F.sigmoid(logits)\n# num = labels.size(0)\n# w = (weights).view(num,-1)\n# w2 = w*w\n# m1 = (probs ).view(num,-1)\n# m2 = (labels ).view(num,-1)\n# intersection = (m1 * m2)\n# score = 2. * ((w2*intersection).sum(1)+1) / ((w2*m1).sum(1) + (w2*m2).sum(1)+1)\n# score = 1 - score.sum()/num\n# return score\n#\n#\n\n#\n\n#\n#\n# def multi_loss(logits, labels):\n# #l = BCELoss2d()(logits, labels)\n#\n#\n# if 0:\n# l = BCELoss2d()(logits, labels) + SoftDiceLoss()(logits, labels)\n#\n# #compute weights\n# else:\n# batch_size,C,H,W = labels.size()\n# weights = Variable(torch.tensor.torch.ones(labels.size())).cuda()\n#\n# if 1: #use weights\n# kernel_size = 5\n# avg = F.avg_pool2d(labels,kernel_size=kernel_size,padding=kernel_size//2,stride=1)\n# boundary = avg.ge(0.01) * avg.le(0.99)\n# boundary = boundary.float()\n#\n# w0 = weights.sum()\n# weights = weights + boundary*2\n# w1 = weights.sum()\n# weights = weights/w1*w0\n#\n# l = WeightedBCELoss2d()(logits, labels, weights) + \\\n# WeightedSoftDiceLoss()(logits, labels, weights)\n#\n# return l\n#\n#\n# #\n# #\n# #\n# #\n# #\n# #\n# #\n# # class SoftCrossEntroyLoss(nn.Module):\n# # def __init__(self):\n# # super(SoftCrossEntroyLoss, self).__init__()\n# #\n# # def forward(self, logits, soft_labels):\n# # #batch_size, num_classes = logits.size()\n# # # soft_labels = labels.view(-1,num_classes)\n# # # logits = logits.view(-1,num_classes)\n# #\n# # logits = logits - logits.max()\n# # log_sum_exp = torch.log(torch.sum(torch.exp(logits), 1))\n# # loss = - (soft_labels*logits).sum(1) + log_sum_exp\n# # loss = loss.mean()\n# #\n# # return loss\n# #\n# #\n# #\n# # # loss, accuracy -------------------------\n# # def top_accuracy(probs, labels, top_k=(1,)):\n# # \"\"\"Computes the precision@k for the specified values of k\"\"\"\n# #\n# # probs = probs.data\n# # labels = labels.data\n# #\n# # max_k = max(top_k)\n# # batch_size = labels.size(0)\n# #\n# # values, indices = probs.topk(max_k, dim=1, largest=True, sorted=True)\n# # indices = indices.t()\n# # corrects = indices.eq(labels.view(1, -1).expand_as(indices))\n# #\n# # accuracy = []\n# # for k in top_k:\n# # # https://stackoverflow.com/questions/509211/explain-slice-notation\n# # # a[:end] # items from the beginning through end-1\n# # c = corrects[:k].view(-1).float().sum(0, keepdim=True)\n# # accuracy.append(c.mul_(1. / batch_size))\n# # return accuracy\n# #\n# #\n# # ## focal loss ## ---------------------------------------------------\n# # class CrossEntroyLoss(nn.Module):\n# # def __init__(self):\n# # super(CrossEntroyLoss, self).__init__()\n# #\n# # def forward(self, logits, labels):\n# # #batch_size, num_classes = logits.size()\n# # # labels = labels.view(-1,1)\n# # # logits = logits.view(-1,num_classes)\n# #\n# # max_logits = logits.max()\n# # log_sum_exp = torch.log(torch.sum(torch.exp(logits-max_logits), 1))\n# # loss = log_sum_exp - logits.gather(dim=1, index=labels.view(-1,1)).view(-1) + max_logits\n# # loss = loss.mean()\n# #\n# # return loss\n# #\n# # ## https://github.com/unsky/focal-loss\n# # ## https://github.com/sciencefans/Focal-Loss\n# # ## https://www.kaggle.com/c/carvana-image-masking-challenge/discussion/39951\n# #\n# # # https://raberrytv.wordpress.com/2017/07/01/pytorch-kludges-to-ensure-numerical-stability/\n# # # https://github.com/pytorch/pytorch/issues/1620\n# # class FocalLoss(nn.Module):\n# # def __init__(self,gamma = 2, alpha=1.2):\n# # super(FocalLoss, self).__init__()\n# # self.gamma = gamma\n# # self.alpha = alpha\n# #\n# #\n# # def forward(self, logits, labels):\n# # eps = 1e-7\n# #\n# # # loss = - np.power(1 - p, gamma) * np.log(p))\n# # probs = F.softmax(logits)\n# # probs = probs.gather(dim=1, index=labels.view(-1,1)).view(-1)\n# # probs = torch.clamp(probs, min=eps, max=1-eps)\n# #\n# # loss = -torch.pow(1-probs, self.gamma) *torch.log(probs)\n# # loss = loss.mean()*self.alpha\n# #\n# # return loss\n# #\n# #\n# #\n# #\n# # # https://arxiv.org/pdf/1511.05042.pdf\n# # class TalyorCrossEntroyLoss(nn.Module):\n# # def __init__(self):\n# # super(TalyorCrossEntroyLoss, self).__init__()\n# #\n# # def forward(self, logits, labels):\n# # #batch_size, num_classes = logits.size()\n# # # labels = labels.view(-1,1)\n# # # logits = logits.view(-1,num_classes)\n# #\n# # talyor_exp = 1 + logits + logits**2\n# # loss = talyor_exp.gather(dim=1, index=labels.view(-1,1)).view(-1) /talyor_exp.sum(dim=1)\n# # loss = loss.mean()\n# #\n# # return loss\n# #\n# # # check #################################################################\n# # def run_check_focal_loss():\n# # batch_size = 64\n# # num_classes = 15\n# #\n# # logits = np.random.uniform(-2,2,size=(batch_size,num_classes))\n# # labels = np.random.choice(num_classes,size=(batch_size))\n# #\n# # logits = Variable(torch.from_numpy(logits)).cuda()\n# # labels = Variable(torch.from_numpy(labels)).cuda()\n# #\n# # focal_loss = FocalLoss(gamma = 2)\n# # loss = focal_loss(logits, labels)\n# # print (loss)\n# #\n# #\n# # def run_check_soft_cross_entropy_loss():\n# # batch_size = 64\n# # num_classes = 15\n# #\n# # logits = np.random.uniform(-2,2,size=(batch_size,num_classes))\n# # soft_labels = np.random.uniform(-2,2,size=(batch_size,num_classes))\n# #\n# # logits = Variable(torch.from_numpy(logits)).cuda()\n# # soft_labels = Variable(torch.from_numpy(soft_labels)).cuda()\n# # soft_labels = F.softmax(soft_labels,1)\n# #\n# # soft_cross_entropy_loss = SoftCrossEntroyLoss()\n# # loss = soft_cross_entropy_loss(logits, soft_labels)\n# # print (loss)\n#\n# main #################################################################\nif __name__ == '__main__':\n print( '%s: calling main function ... ' % os.path.basename(__file__))\n\n\n\n print('\\nsucess!')","sub_path":"net/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":14486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"287319002","text":"#!/usr/bin/env python3\n\"\"\"\nThe WagMan publisher is responsible for distributing output of the Wagman\nserialline to subscribers. Subscribers may need to use a session ID.\n\"\"\"\nfrom serial import Serial\nimport zmq\nimport time\nimport sys\nimport re\nimport logging\nfrom multiprocessing import Process\n\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s - %(levelname)s - %(message)s')\n\nheader_prefix = '<<<-'\nfooter_prefix = '->>>'\n\n\ndef publisher(serial):\n context = zmq.Context()\n\n socket = context.socket(zmq.PUB)\n socket.setsockopt(zmq.SNDTIMEO, 15000)\n socket.bind('ipc:///tmp/zeromq_wagman-pub')\n\n try:\n output = []\n incommand = False\n commandname = None\n session_id = ''\n\n while True:\n line = serial.readline().decode().strip()\n print(line)\n\n if incommand:\n if line.startswith(footer_prefix):\n incommand = False\n\n if session_id:\n header = '{} cmd.{}'.format(session_id, commandname)\n else:\n header = 'cmd.{}'.format(commandname)\n\n body = '\\n'.join(output)\n\n logging.debug(\"sending header: {}\".format(header))\n logging.debug(\"sending body: {}\".format(body))\n\n msg = '{}\\n{}'.format(header, body)\n\n socket.send_string(msg)\n output = []\n else:\n output.append(line)\n elif line.startswith(header_prefix):\n session_id = ''\n logging.debug('received header: {}'.format(line))\n matchObj = re.match(r'.*sid=(\\S+)', line, re.M | re.I)\n if matchObj:\n session_id = matchObj.group(1).rstrip()\n\n if session_id:\n logging.debug(\"detected session_id: {}\".format(session_id))\n else:\n logging.debug(\"no session_id detected\")\n\n fields = line.split()\n logging.debug(fields)\n\n commandname = fields[-1]\n\n incommand = True\n elif line.startswith('log:'):\n socket.send_string(line)\n finally:\n socket.send_string('error: not connected to wagman')\n\n\ndef server(serial):\n context = zmq.Context()\n\n server_socket = context.socket(zmq.REP)\n server_socket.setsockopt(zmq.SNDTIMEO, 15000)\n server_socket.bind('ipc:///tmp/zeromq_wagman-server')\n\n while True:\n try:\n serial.write(server_socket.recv() + b'\\n')\n except Exception as e:\n server_socket.send_string('ERROR')\n break\n else:\n server_socket.send_string('OK')\n\n\nif __name__ == '__main__':\n try:\n wagman_device = sys.argv[1]\n except IndexError:\n wagman_device = '/dev/waggle_sysmon'\n\n while True:\n try:\n with Serial(wagman_device, 57600, timeout=10, writeTimeout=10) as serial:\n processes = [\n Process(target=publisher, args=(serial,)),\n Process(target=server, args=(serial,)),\n ]\n\n for p in processes:\n p.start()\n\n while all(p.is_alive() for p in processes):\n time.sleep(1)\n\n for p in processes:\n p.terminate()\n except OSError:\n print('could not connect to device')\n time.sleep(3)\n","sub_path":"wagman/wagman-driver.py","file_name":"wagman-driver.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"56267367","text":"import cv2\nfrom PIL import Image\nimport threading\nfrom http.server import BaseHTTPRequestHandler,HTTPServer\nfrom socketserver import ThreadingMixIn\nimport io\nimport time\n\n\nclass CamHandler(BaseHTTPRequestHandler):\n\tdef do_GET(self):\n\t\tif self.path.endswith('.mjpg'):\n\t\t\tself.send_response(200)\n\t\t\tself.send_header('Content-type','multipart/x-mixed-replace; boundary=--jpgboundary')\n\t\t\tself.end_headers()\n\t\t\twhile True:\n\t\t\t\ttry:\n\t\t\t\t\trc,img = capture.read()\n\t\t\t\t\tif not rc:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\timgRGB=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n\t\t\t\t#\tcv2.imwrite('tmpFile.jpg', imgRGB)\n\t\t\t\t\tjpg = Image.fromarray(imgRGB)\n\t\t\t\t\ttmpFile = io.BytesIO()\n\t\t\t\t\tjpg.save(tmpFile, 'JPEG')\n\t\t\t\t\tself.wfile.write(u\"--jpgboundary\".encode('utf-8'))\n\t\t\t\t\tself.send_header('Content-type','image/jpeg')\n\t\t\t\t\tself.send_header('Content-length', len(tmpFile.getvalue()))\n\t\t\t\t\tself.end_headers()\n\t\t\t\t\tself.wfile.write(tmpFile.getvalue())\n\t\t\t\t\ttime.sleep(0.1)\n\t\t\t\texcept KeyboardInterrupt:\n\t\t\t\t\tbreak\n\t\t\treturn\n\t\tif self.path.endswith('.html'):\n\t\t\tself.send_response(200)\n\t\t\tself.send_header('Content-type','text/html')\n\t\t\tself.end_headers()\n\t\t\tself.wfile.write(u''.encode('utf-8'))\n\t\t\tself.wfile.write(u''.encode('utf-8'))\n\t\t\tself.wfile.write(u''.encode('utf-8'))\n\t\t\treturn\n\n\n\ndef main():\n\tglobal capture\n\tcapture = cv2.VideoCapture(0)\n\tglobal img\n\ttry:\n\t\tserver = HTTPServer(('localhost', 8080), CamHandler)\n\t\tprint (\"server started\")\n\t\tserver.serve_forever()\n\texcept KeyboardInterrupt:\n\t\tcapture.release()\n\t\tserver.socket.close()\n\nif __name__ == '__main__':\n\tmain()\n\n\n","sub_path":"stream.py","file_name":"stream.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"259960321","text":"class TestCreateReplyPresenterImplementation:\n\n def test_raise_exception_for_invalid_comment_id(self):\n from fb_post_clean_arch_v2.presenters.create_reply_presenter_implementation import \\\n CreateReplyPresenterImplementation\n presenter = CreateReplyPresenterImplementation()\n from fb_post_clean_arch_v2.constants.exception_messages import \\\n INVALID_COMMENT_ID\n expected_response = INVALID_COMMENT_ID[0]\n response_status_code = INVALID_COMMENT_ID[1]\n\n response_object = presenter.raise_exception_for_invalid_comment_id()\n import json\n response = json.loads(response_object.content)\n assert response['http_status_code'] == 400\n assert response['res_status'] == response_status_code\n assert response['response'] == expected_response\n\n def test_get_response_given_comment_id_return_comment_id_dict(self):\n from fb_post_clean_arch_v2.presenters.create_reply_presenter_implementation import \\\n CreateReplyPresenterImplementation\n presenter = CreateReplyPresenterImplementation()\n comment_id = 1\n expected_response = {\n \"comment_id\": comment_id\n }\n response_object = presenter.get_response(reply_id=comment_id)\n\n import json\n actual_response = json.loads(response_object.content)\n\n assert actual_response == expected_response\n","sub_path":"fb_post_clean_arch_v2/tests/presenters/test_create_reply_presenter_implementation.py","file_name":"test_create_reply_presenter_implementation.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"253707350","text":"import datetime\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.db import IntegrityError\n\nfrom core.models import Conversion\nfrom utils.telephony import get_token, get_calls\n\n\nclass Command(BaseCommand):\n help = 'Импортируем звонки из Яндекс.Телефонии'\n\n def handle(self, *args, **options):\n try:\n last_synced_call = Conversion.objects.filter(type=Conversion.PHONE_CALL).latest('datetime')\n except Conversion.DoesNotExist:\n start_utc = datetime.datetime(2017, 8, 1)\n else:\n start_utc = last_synced_call.datetime\n\n token = get_token(None)\n\n response = get_calls(\n token,\n startUtc=start_utc,\n pageSize=1000,\n callFilter=['Connected', 'Incoming'],\n )\n calls = response.json()['data']['calls']\n\n for call in calls:\n duration = str(datetime.timedelta(milliseconds=int(call['duration'])))\n description = 'Длительность: {}'.format(duration)\n date = datetime.datetime.strptime(call['dateTimeUtc'], '%Y-%m-%dT%H:%M:%S.%fZ')\n\n try:\n conversion, created = Conversion.objects.get_or_create(\n type=Conversion.PHONE_CALL,\n datetime=date,\n sender=call['from'],\n description=description,\n defaults={\n 'value': 0,\n },\n )\n except IntegrityError:\n pass\n","sub_path":"mebel/core/management/commands/import_calls.py","file_name":"import_calls.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"187855309","text":"import torch\nimport torch.nn.functional as F\nimport numpy as np\nimport torch.nn as nn\n\nimport time\n\ndef evaluate(model, db, opt):\n \"\"\"\n Args:\n model (torch.nn.module): the model to be evaluated in the current stage\n db (torch.utils.data.Dataset): prepared torch dataset object\n opt: command line input from the user\n \"\"\"\n model.eval()\n with torch.no_grad():\n # set the model in the evaluation mode\n eval_loss = 0\n eval_acc = 0\n loader = torch.utils.data.DataLoader(db['eval'], batch_size = opt.eval_batch_size, shuffle=False, num_workers = 4)\n num_eval = len(db['eval'])\n for batch_idx, batch in enumerate(loader):\n data = batch['image']\n target = batch['label']\n if opt.cuda:\n with torch.no_grad():\n data, target = data.cuda(), target.cuda()\n outputs = model(data)\n _, preds = torch.max(outputs, 1)\n\n eval_loss += F.cross_entropy(outputs, target).data.item()\n\n eval_acc += (preds == target).sum().data.item()\n eval_loss /= num_eval\n eval_acc /= num_eval\n\n print('\\nTest set: Average loss: {:.6f}. Average accuracy {:.6f}'.format(\n eval_loss, eval_acc))\n\ndef train(model, optim, sche, db, opt):\n \"\"\"\n Args:\n model (torch.nn.module): the model to be trained\n optim (torch.optim.X): torch optimizer to be used\n db (torch.utils.data.Dataset): prepared torch dataset object\n opt: command line input from the user\n \"\"\"\n for epoch in range(1, opt.epochs + 1):\n sche.step()\n model.train()\n criterion = nn.CrossEntropyLoss()\n\n train_loader = torch.utils.data.DataLoader(db['train'], batch_size = opt.batch_size, shuffle = True)\n for batch_idx, batch in enumerate(train_loader):\n data = batch['image']\n target = batch['label']\n if opt.cuda:\n with torch.no_grad():\n data, target = data.cuda(), target.cuda()\n # erase all computed gradient\n optim.zero_grad()\n outputs = model(data)\n _, preds = torch.max(outputs, 1)\n preds = preds.clamp(min=1e-6,max=1) # resolve some numerical issue\n\n loss = F.cross_entropy(outputs, target)\n # compute gradient\n loss.backward()\n #print(\"Model's state_dict:\")\n #if loss.data.item() != 0:\n # print(model.alex.features[0].weight.data)\n #for param_tensor in model.state_dict():\n # print(param_tensor, \"\\t\", model.state_dict()[param_tensor].size())\n # update parameters in the neural decision forest\n #print(prediction.data)\n optim.step()\n\n if batch_idx % opt.report_every == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f} '.format(\n epoch, batch_idx * opt.batch_size, len(db['train']),\n 100. * batch_idx / len(train_loader), loss.data.item()))\n # evaluate model if specified\n if opt.eval and batch_idx!= 0 and batch_idx % opt.eval_every == 0:\n evaluate(model, db, opt)\n model.train()\n","sub_path":"src/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":3280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"117948914","text":"#!/usr/bin/env python\n# vim: set fileencoding=utf-8 :\n\nimport bob.db.base\n\n\nclass BioFile(bob.db.base.File):\n \"\"\"A simple base class that defines basic properties of File object for the use in verification experiments\n\n Parameters\n ----------\n\n client_id : object\n The id of the client this file belongs to.\n Its type depends on your implementation.\n If you use an SQL database, this should be an SQL type like Integer or String.\n\n file_id : object\n see :py:class:`bob.db.base.File` constructor\n\n path : object\n see :py:class:`bob.db.base.File` constructor\n \"\"\"\n\n def __init__(self, client_id, path, file_id=None, **kwargs):\n super(BioFile, self).__init__(path, file_id, **kwargs)\n\n # just copy the information\n self.client_id = client_id\n \"\"\"The id of the client, to which this file belongs to.\"\"\"\n\n\nclass BioFileSet(BioFile):\n \"\"\"This class defines the minimum interface of a set of database files that needs to be exported.\n Use this class, whenever the database provides several files that belong to the same probe.\n Each file set has an id, and a list of associated files, which are of\n type :py:class:`bob.bio.base.database.BioFile` of the same client.\n The file set id can be anything hashable, but needs to be unique all over the database.\n\n Parameters\n ----------\n\n file_set_id : str or int\n A unique ID that identifies the file set.\n\n files : [:py:class:`bob.bio.base.database.BioFile`]\n A non-empty list of BioFile objects that should be stored inside this file.\n All files of that list need to have the same client ID.\n \"\"\"\n\n def __init__(self, file_set_id, files, path=None, **kwargs):\n # don't accept empty file lists\n assert len(files), \"Cannot create an empty BioFileSet\"\n\n # call base class constructor\n super(BioFileSet, self).__init__(\n files[0].client_id,\n \"+\".join(f.path for f in files) if path is None else path,\n file_set_id, **kwargs)\n\n # check that all files come from the same client\n assert all(f.client_id == self.client_id for f in files)\n\n # The list of files contained in this set\n self.files = files\n\n def __lt__(self, other):\n \"\"\"Defines an order between file sets by using the order of the file set ids.\"\"\"\n # compare two BioFile set objects by comparing their IDs\n return self.id < other.id\n","sub_path":"bob/bio/base/database/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"641010187","text":"import os\nimport shutil\nimport subprocess\n\nfrom ..logger import get_logger\nfrom .. import error\n\n\nclass PanIndiServer(object):\n\n \"\"\" A module to start an INDI server\n\n Args:\n drivers(dict): Dict of valid drivers for indiserver to start, defaults to\n {'PAN_CCD_SIMULATOR': 'indi_simulator_ccd'}\n fifo(str): Path to FIFO file of running indiserver\n \"\"\"\n\n def __init__(self, drivers=[], **kwargs):\n\n # self.logger = get_logger(self)\n self.logger = get_logger(self)\n self._indiserver = shutil.which('indiserver')\n\n assert self._indiserver is not None, error.PanError(\"Cannot find indiserver command\")\n\n self.drivers = drivers\n self._proc = None\n\n try:\n # Start the server\n self.start()\n except Exception as e:\n self.logger.warning(\"Problem with staring the INDI server: {}\".format(e))\n\n self._connected = False\n self.logger.debug(\"PanIndiServer created. PID: {}\".format(self._proc.pid))\n\n\n##################################################################################################\n# Properties\n##################################################################################################\n\n @property\n def is_connected(self):\n \"\"\" INDI Server connection\n\n Tests whether running PID exists\n \"\"\"\n return os.getpgid(self._proc.pid)\n\n##################################################################################################\n# Methods\n##################################################################################################\n\n def start(self, *args, **kwargs):\n \"\"\" Start an INDI server.\n\n Host, port, and drivers must be configured in advance.\n\n Returns:\n _proc(process): Returns process from `subprocess.Popen`\n \"\"\"\n\n cmd = [self._indiserver]\n\n opts = args if args else ['-m', '100']\n cmd.extend(opts)\n cmd.extend(self.drivers)\n\n try:\n self.logger.debug(\"Starting INDI Server: {}\".format(cmd))\n self._proc = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)\n self.logger.debug(\"INDI server started. PID: {}\".format(self._proc.pid))\n except Exception as e:\n self.logger.warning(\"Cannot start indiserver: {}\".format(e))\n\n def stop(self):\n \"\"\" Stops the INDI server \"\"\"\n if self._proc:\n if os.getpgid(self._proc.pid):\n self.logger.debug(\"Shutting down INDI server (PID {})\".format(self._proc.pid))\n\n try:\n outs, errs = self._proc.communicate(timeout=3)\n except subprocess.TimeoutExpired:\n self._proc.kill()\n outs, errs = self._proc.communicate()\n\n self.logger.debug(\"Output from INDI server: {}\".format(outs))","sub_path":"pocs/utils/indi/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"302216776","text":"# -*- coding: utf-8 -*-\n\n\nfrom plugins.ipmi import supermicro_ipmi\nfrom plugins.switches import dell_switches\n\n\nclass IPMI(supermicro_ipmi.SuperMicroIPMI):\n\n def _read_config(self, *args, **kwargs):\n return \"engineer\"\n\n\nclass DellSwitch(dell_switches.DellExtension):\n\n def __init__(self, *args, **kwargs):\n super(DellSwitch, self).__init__(False)\n self.password = \"Fa#iR36cHE0\"\n\n def _read_config(self):\n vlans = [10, 11, 12, 13, 14]\n switches = {\n \"sw-de-1\": {\"ip\": \"172.16.42.10\", \"vendor\": \"dell\", \"vlans\": {}},\n \"sw-de-3\": {\"ip\": \"172.16.42.12\", \"vendor\": \"dell\", \"vlans\": {}},\n \"sw-de-5\": {\"ip\": \"172.16.42.15\", \"vendor\": \"dell\", \"vlans\": {}},\n \"sw-de-7\": {\"ip\": \"172.16.42.33\", \"vendor\": \"dell\", \"vlans\": {}},\n \"sw-de-9\": {\"ip\": \"172.16.42.37\", \"vendor\": \"dell\", \"vlans\": {}},\n \"sw-de-11\": {\"ip\": \"172.16.42.43\", \"vendor\": \"dell\", \"vlans\": {}},\n \"sw-de-13\": {\"ip\": \"172.16.42.45\", \"vendor\": \"dell\", \"vlans\": {}}}\n\n return switches, vlans\n","sub_path":"deploy/deploy_cluster/unplugin.py","file_name":"unplugin.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"618966608","text":"class Solution:\n def titleToNumber(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n _sum, n = 0, len(s)\n for i in range(n):\n _sum = _sum * 26 + (ord(s[i]) - 64)\n n -= 1\n return _sum\n\n\nif __name__ == '__main__':\n print(Solution().titleToNumber('AB'))","sub_path":"leetcode_171/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"652982937","text":"import pandas as pd\nfrom pathlib import Path\nimport sys\n\ndef normalize_height(g):\n values = g.values.reshape(-1, 20, 3)\n minVals, maxVals = values[0:1].min(axis=(0, 1)), values[0:1].max(axis=(0, 1))\n\n scale = 1.0 / (maxVals[1] - minVals[1])\n new_values = (values - minVals) * scale + minVals\n new_values = new_values.reshape(-1, 60)\n return pd.DataFrame(new_values, index=g.index, columns=g.columns)\n\ndef translate_to_origin(g):\n values = g.values.reshape(-1, 20, 3)\n centroid_pose = values.sum(axis=0, keepdims=True) / values.shape[0]\n centroid_point = centroid_pose.sum(axis=1, keepdims=True) / values.shape[1]\n new_values = values - centroid_point\n new_values = new_values.reshape(-1, 60)\n\n return pd.DataFrame(new_values, index=g.index, columns=g.columns)\n\ndef invert_ys(g):\n values = g.values.reshape(-1, 20, 3)\n values[:, 1] = -values[:, 1]\n new_values = values.reshape(-1, 60)\n\n return pd.DataFrame(new_values, index=g.index, columns=g.columns)\n\n\nif __name__ == \"__main__\":\n for f in Path(sys.argv[1]).iterdir():\n if f.suffix == '.csv':\n frame = pd.read_csv(f, index_col=['GestureType', 'Timestamp', 'NumJoints', 'ParticipantID', 'TrialID'])\n (frame.groupby(['GestureType', 'ParticipantID', 'TrialID'])\n .apply(lambda g: (g.pipe(normalize_height)\n .pipe(translate_to_origin)\n # .pipe(invert_ys)\n .to_csv(f)))\n )\n","sub_path":"process_kinect_datasets/KinderGator/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"649255205","text":"from rest_framework.views import APIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework_tracking.mixins import LoggingMixin\nfrom Tickets.models import ReviewSystem\nfrom datetime import datetime\nfrom generic_services.responses import customized_data_response, exception_response\nfrom generic_services.decorators.usertype import admin_user_check\n\t\n\nclass AllUsers(LoggingMixin,APIView):\n\t\"\"\"\n\tLiked and Disliked User GET API\n\n\t\tAuthorization Required\t\t: Yes\n\t\tService Usage & Description\t: This Api is used to users who liked & disliked park.\n\t\"\"\"\n\tpermission_classes = (IsAuthenticated,)\n\t@admin_user_check\n\tdef get(self, request, format=None):\n\t\ttry:\n\t\t\tdata = request.data\n\t\t\trecord = ReviewSystem.objects.all()\n\t\t\tresult = []\n\t\t\tif record.count()==0:\n\t\t\t\tresult=[]\n\t\t\telse:\n\t\t\t\ttotal_likes = record.filter(is_like=1).count()\n\t\t\t\ttotal_dislikes = record.filter(is_like=0).count()\n\t\t\t\tfor i in record:\n\t\t\t\t\tdata_dict = {}\n\t\t\t\t\tdata_dict[\"name\"] = i.user_name\n\t\t\t\t\tdata_dict[\"mobile\"] = i.user_mobile\n\t\t\t\t\tdata_dict[\"date_of_booking\"] = i.date_of_booking.strftime('%d-%m-%Y %I:%M %p')\n\t\t\t\t\tdata_dict[\"liked\"] = i.is_like\n\n\t\t\t\t\tresult.append(data_dict)\n\t\t\treturn customized_data_response(result,total_likes,total_dislikes)\n\t\texcept Exception as e:\n\t\t\treturn exception_response(str(e))","sub_path":"Tickets/api/admin/review/allUsers.py","file_name":"allUsers.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"356139124","text":"import numpy as np\nimport pickle\nimport argparse\nimport mutator_classes\nimport os\nimport stationary_distribution_aug as sd\nimport gzip\nfrom collections import defaultdict as ddict\n\ndef main():\n # a set of default params to use\n\n default_params = {'N': 2000, # population size\n 'M': 1000, # number of modifier loci, M\n 'h': 0.5, # h\n 's': 0.01, # s - together hs are the average fitness effects of mutations at selected loci\n 'phi': 1E-12, # effect size of mutator alleles\n 'mutator_mutation_rate': 1.25E-7, # Mutation rate at modifier sites\n 'mutation_rate': 1.25E-7, # baseline mutation rate at selected sites, u0\n 'loci': 3E8 * 0.08, # number of selected loci\n 'constant': True, # is the population size constant\n 'split_gen': 0,\n # the generation at which the ancestral population is split into europeans and africans\n 'backup_gen': 100, # backup the population every 100 generations\n 'ignore_gen': 70, # stop simulations this many generations from the present\n 'total_gen': 10000, # how many total generations to simulate\n 'outpath': 'blah3', # where do we store results\n 'NE_path': '/Users/will_milligan/PycharmProjects/MUTATOR_FINAL/MSMC_NE_dict.pickle', # where do we get population size estimates\n 'invariable_mutator_mutation_rate': True,\n 'variable_mutator_effect': False,\n 'sampling_interval': 1}\n\n print(default_params)\n # # get and parse input string\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--N\", help=\"population_size\", type=int, default=default_params['N'])\n parser.add_argument(\"--M\", help=\"number of modifier loci, M\", type=int, default=default_params['M'])\n parser.add_argument(\"--h\", help=\"h\", type=float, default=default_params['h'])\n parser.add_argument(\"--s\", help=\"s\", type=float, default=default_params['s'])\n parser.add_argument(\"--phi\", help=\"Mutator effect size\", type=float, default=default_params['phi'])\n parser.add_argument(\"--mutator_mutation_rate\", help=\"Mutation rate at modifier sites\", type=float, default=default_params['mutator_mutation_rate'])\n parser.add_argument(\"--mutation_rate\", help=\"baseline mutation rate at selected sites, u0\", type=float, default=default_params['mutation_rate'])\n parser.add_argument(\"--loci\", help=\"number of selected loci\", type=float, default=default_params['loci'])\n parser.add_argument(\"--constant\", help=\"Is pop. size constant?\", type=bool, default=default_params['constant'])\n parser.add_argument(\"--invariable_mutator_mutation_rate\", help=\"Is the mutator mutation rate invariable?\", type=bool,\n default=default_params['invariable_mutator_mutation_rate'])\n parser.add_argument(\"--split_gen\", help=\"What generation do we split at, None if not split\", type=int, default=default_params['split_gen'])\n parser.add_argument(\"--total_gen\", help=\"Total num. of generations to simulate\", type=int, default=default_params['total_gen'])\n parser.add_argument(\"--backup_gen\", help=\"How many generations between backing up populations \", type=int, default=default_params['backup_gen'])\n parser.add_argument(\"--ignore_gen\", help=\"Stop simulations at this generations\", type=int, default=default_params['ignore_gen'])\n parser.add_argument(\"--outpath\", help=\"Where to store populations, should be directory (i.e., end in /)\", type=str, default=default_params['outpath'])\n parser.add_argument(\"--NE_path\", help=\"Where are pop. sizes stored\", type=str, default=default_params['NE_path'])\n parser.add_argument(\"--variable_mutator_effect\", help=\"False is mutator effect size is constant\", type=bool, default=default_params['variable_mutator_effect'])\n parser.add_argument(\"--store_trajectories\", help=\"Should we consolidate and store all mutator trajectories\", type=bool, default=False)\n parser.add_argument(\"--sampling_interval\", help=\"How often to sample mutator frequencies in units of N \",\n type=float, default=default_params['sampling_interval'])\n\n args = parser.parse_args()\n print(args)\n # check that results directory exists\n assert os.path.exists(args.outpath)\n\n for pop_dir in os.listdir(args.outpath):\n print(pop_dir)\n process_population_results(args = args, pop_dir = pop_dir)\n\n\ndef process_population_results(args,pop_dir):\n\n # determine if this is a population that predates or postdates a population split\n if pop_dir == 'ancestral':\n split = False\n else:\n split = True\n\n # directory where results are stored for this population\n population_directory = os.path.join(args.outpath, pop_dir)\n\n # load a copy of the population\n # useful for getting some specific parameters defined after initialization (e.g., distribution of fitness effects)\n population = load_final_population(population_directory)\n\n # load a matrix representing the mutator allele frequency at each\n mutator_counts = load_mutator_counts(args = args, population = population,split=split)\n\n # sample the mutator frequencies\n mutator_frequencies = mutator_counts[::int(args.sampling_interval*args.N), :]\n\n\n if args.variable_mutator_effect:\n\n bin_windows = np.append(np.logspace(-2,3,11)[1:-1]/(4*args.N*args.h*args.s*args.loci),np.inf)\n\n binned_results,bin_indexes = bin_results(population,mutator_frequencies, bin_windows)\n\n for bin_n,binned_frequencies in binned_results.items():\n\n do_stuff(mutator_frequencies=binned_frequencies,\n population=population,\n args=args,\n population_directory = population_directory,\n append = f'_bin{bin_n}',\n phi_values = bin_indexes[bin_n])\n\n else:\n\n do_stuff(mutator_frequencies=mutator_frequencies,\n population=population,\n args=args,\n population_directory = population_directory)\n\n if args.store_trajectories:\n write_out_mutator_trajectories(args = args, population = population, population_directory = population_directory, mutator_counts = mutator_counts)\n\n# bin modifier sites based on effect size\ndef bin_results(population,mutator_frequencies, bin_windows):\n\n bin_indexes = ddict(list)\n for phi_index,phi in enumerate(population.mutator_effects):\n bin_n = np.where(bin_windows>phi)[0][0]\n bin_indexes[bin_n].append(phi_index)\n\n binned_results = {}\n for bin_n, indices in bin_indexes.items():\n binned_results[bin_n] = mutator_frequencies[:,indices]\n\n return binned_results,bin_indexes\n\n# calculate relevant quantities\ndef do_stuff(mutator_frequencies,population,args,population_directory,append = '',phi_values=None):\n\n # calculate mutator freq. moments\n mean = np.nanmean(mutator_frequencies)\n variance = np.nanvar(mutator_frequencies)\n within = np.nanmean(mutator_frequencies*(1-mutator_frequencies))\n\n # calculate stationary distributions\n if type(phi_values) == type(None):\n sD = calculate_stationary_distributions(population = population,args = args)\n else:\n sD = {}\n for phi in phi_values:\n sD[phi] = calculate_stationary_distributions(population=population, args=args, phi = population.mutator_effects[phi])\n\n write_out(args=args,\n population=population,\n mutator_frequencies = mutator_frequencies,\n mean = mean,\n variance = variance,\n within = within,\n sD = sD,\n phi_values = phi_values,\n population_directory = population_directory,\n append = append)\n\ndef write_out(args,\n population,\n mutator_frequencies,\n mean,\n variance,\n within,\n sD,\n phi_values,\n population_directory,\n append=''):\n if mean >= 10*np.mean([sum([p*q for p,q in s.items()]) for s in sD.values()]):\n print(mean,population_directory,append)\n\n with open(os.path.join(population_directory,f'summarized_results{append}.pickle'),'wb+') as fout:\n pickle.dump((args,population,mutator_frequencies,mean,variance,within,sD,phi_values),fout)\n\ndef write_out_mutator_trajectories(args, population, population_directory, mutator_counts):\n\n mutator_count_storage_outpath = os.path.join(population_directory,'mutator_count_storage/')\n os.makedirs(mutator_count_storage_outpath, exist_ok=True)\n\n for i in args.M:\n with gzip.open(os.path.join(mutator_count_storage_outpath,f'trajectory_{i}.gz'),'wb+') as fout:\n pickle.dump(mutator_counts[:,i],fout)\n\ndef calculate_stationary_distributions(population, args, phi = np.nan):\n if np.isnan(phi):\n phi = args.phi\n\n return sd.get_SD(p=population, phi=phi)\n\n# load and return final population\ndef load_final_population(population_directory):\n\n with open(os.path.join(population_directory,'final_population.pickle'), 'rb') as fin:\n final_population = pickle.load(fin)\n return final_population\n\ndef load_mutator_counts(args, population,split):\n\n # determine what size the matrix should be and how many generations were simulated\n if split:\n total_gen = args.split_gen\n simulated_gen = args.split_gen - args.ignore_gen\n else:\n total_gen = args.total_gen - args.split_gen\n simulated_gen = total_gen - args.split_gen\n\n # make a blank matrix\n mutator_counts = np.zeros([total_gen,args.M],dtype=float)\n\n # load realized mutator allele frequencies\n with gzip.open(population.mutator_counts_outpath,'rb') as fin:\n for i in range(simulated_gen):\n mutator_counts[i,:] = pickle.load(fin)\n\n # set ignored generations to nan\n if split:\n mutator_counts[simulated_gen:,:] = np.nan\n\n return mutator_counts\n\nif __name__ == '__main__':\n main()\n","sub_path":"summarize_simulations_variablePhi.py","file_name":"summarize_simulations_variablePhi.py","file_ext":"py","file_size_in_byte":10279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"441440055","text":"from config import *\nimport pymysql\nimport redis\n\nclass MysqlClient(object):\n def __init__(self, host=MYSQL_HOST, port=MYSQL_PORT, user=MYSQL_USER, passwd=MYSQL_PASSWD, db=MYSQL_DB,charset=MYSQL_CHARSET):\n self.client = pymysql.connect(host=host, port=port, user=user, passwd=passwd, db=db, charset=charset)\n self.cursor = self.client.cursor()\n\n def save(self, sql):\n try:\n self.cursor.execute(sql)\n self.client.commit()\n except:\n self.client.rollback()\n\n def find_all(self, sql):\n try:\n self.cursor.execute(sql)\n results = self.cursor.fetchall()\n return results\n except:\n print(\"Error: unable to fetch data\")\n return None\n\n def find_one(self, sql):\n try:\n self.cursor.execute(sql)\n results = self.cursor.fetchone()\n return results\n except:\n print(\"Error: unable to find_one data\")\n return None\n\nclass RedisClient(object):\n def __init__(self, host=REDIS_HOST, port=REDIS_PORT):\n self.client = redis.Redis(host=host, port=port)\n\n def push(self, redis_key, redis_res):\n try:\n print('pushing...' + str(redis_key) + '...' + str(redis_res))\n self.client.rpush(redis_key, redis_res)\n except:\n print('push失败', + str(redis_res))\n return None\n\n def pop(self, redis_key):\n try:\n results = self.client.blpop(redis_key, timeout=5)\n return results\n except:\n print(\"pop Error: unable to fetch data\")\n return None\n\n def llen(self, redis_key):\n try:\n results = self.client.llen(redis_key)\n return results\n except:\n print(\"llen Error: unable to fetch data\")\n return None\n\nif __name__ == '__main__':\n conn = MongoClient()\n data = {\n 'user_id':1266321801,\n 'aa':'aaga'\n }\n conn.save(data)\n","sub_path":"bossZP_spss/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"470256097","text":"# SortTesting\n# MAIN:\n# https://en.wikipedia.org/wiki/Sorting_algorithm\n\n# Now we must return/print mylist, sorted from least to greatest...\n# I could just google it, but it will be more fun to figure it out myself\n# google: sorting algorithms python is awfully tempting...\n\n# Clearly, a loop and some if statements are needed\n\n# If I remember, sorting a list is in O(nlogn) time\n# Insertion sort has O(n) running time\n# Never mind, I am just trying to be fancy - look it up on wikipedia...\n\n# USEFUL: len(listname) returns length of list, as in the number of entries\n# USEFUL: listname.insert(a, b) inserts b at index a\n# NOTE: To change, say, entry0, just say mylist[0] = 4\n# May be easier to append, though (see lab 2)\n# USEFUL: More on lists:\n# https://docs.python.org/3.5/tutorial/datastructures.html\n\n# NOTE: TRY TO IMPLEMENT AN INSERTION SORT AND A SELECTION SORT\n\n# A simple list - need to find out how to do arrays as well,\n# but this does just fine...\n\n\n\ndef listSort(listIn):\n listOut = []\n for x in range(0, len(listIn)):\n needAppend = True\n for y in range(0, len(listOut)):\n if listIn[x] <= listOut[y]: #may need to make the <= a <, we will see\n listOut.insert(listIn[x], y)\n needAppend = False; #in Python, boolean values need to be capitalized\n if needAppend == True:\n listOut.insert(len(listOut), listIn[x])\n print(listOut) # for debugging purposes\n #return listOut\n\n\nmylist = [3, 6, 2, 5, 9, 0, 4, 1, 8, 7]\n\nlistSort(mylist)\n\n \n # First we have a main loop that runs through integers between 1 and\n # length of list, to take each element of the list an run it though\n # the nested loop\n # Now we need another loop within to take each element of\n # listIn(each run through main loop) and find out where it\n # goes in listOut\n # Since listOut is sorted, simply go through each element of listout,\n # compare, and insert at proper position when the certain element of\n # listIn being tested is less then the next one, or we reach the end of the\n # list, and we just insert it at the end\n\n # This seems to be an implementation of an insertion sort\n # afterwards, google other implementations...\n\n\n\n\n\n","sub_path":"python/SortTesting.py","file_name":"SortTesting.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"261124243","text":"import numpy as np\r\nimport math\r\nfrom sympy import *\r\n\r\n\r\n\r\ndef sigmoid(x):\r\n return 1/(1+math.exp(-x))\r\n\r\n\r\ni =[0.05,0.10]\r\no=[0.01,0.99]\r\nw=[0.15,.2,.25,.30,.40,.45,.5,.55]\r\nb =[.35,.6]\r\nc = o[0]\r\nnet_h1= w[0]*i[0]+w[1]*i[1]+b[0]\r\n\r\nout_h1=sigmoid(net_h1)\r\n\r\n\r\nnet_h2= w[2]*i[0]+w[3]*i[1]+b[0]\r\n\r\nout_h2=sigmoid(net_h2)\r\n\r\n\r\nnet_o1 = out_h1*w[4]+w[5]*out_h2+b[1]\r\n\r\nout_o1= sigmoid(net_o1)\r\na=out_o1\r\n\r\n\r\nnet_o2 = out_h2*w[7]+w[6]*out_h1+b[1]\r\n\r\nout_o2= sigmoid(net_o2)\r\n\r\nEo1 = ((out_o1-o[0])**2)/2\r\nEo2 = ((out_o2-o[1])**2)/2\r\nEtot = Eo1+Eo2\r\n\r\nfrom sympy import symbols, diff,solve\r\nfrom sympy import Symbol,Derivative\r\n\r\nout_o1 = Symbol('out_o1')\r\nf = ((out_o1-o[0])**2)/2\r\nderiv = Derivative(f,out_o1)\r\nd=deriv.doit().subs({out_o1:a},{o[0]:c})\r\n\r\n\r\n\r\np1=1/(1+math.exp(-net_o1))\r\np2= p1*(1-p1)\r\n\r\nnet_o1, w[4] = symbols('net_o1 w[4]', real=True)\r\nf = out_h1*w[4]+w[5]*out_h2+b[1]\r\np3 = diff(f, w[4])\r\n\r\n","sub_path":"Neural.py","file_name":"Neural.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"581756795","text":"#!/usr/bin/env python\n\nimport time\nimport sqlite3\nimport os\nimport sys\nimport random\nimport codecs\nimport eyed3\nimport subprocess\nimport re\nimport curses\nimport threading\nfrom collections import deque\n\nshow_intp = True\ntemporary_show = False\ncurrent_dir = os.path.dirname(__file__) or '.'\n\n\ndef listening():\n key = ''\n while True:\n key = chr(stdscr.getch())\n global show_intp\n if key == 'q':\n curses.endwin()\n os._exit(0)\n elif key == ' ':\n global auto_flag\n if auto_flag.isSet():\n auto_flag.clear()\n else:\n auto_flag.set()\n elif key == 'd':\n if not temporary_show:\n # show interpret but not pronouce\n readword(pronounce=False, show_intp_fore=True)\n elif not auto_flag.isSet():\n # show interpret and pronouce\n readword(auto=False, show_intp_fore=True)\n elif key == 'e':\n show_intp = not show_intp\n readword(pronounce=False)\n elif key == 'w':\n remember() # delete -1 word\n elif key == 'r':\n refresh(True)\n\n elif key in ['f', 's', 'a']:\n manual(key)\n\n\ndef readword(auto=True, pronounce=True, show_intp_fore=False):\n word, intp = words[-1]\n word = word.strip()\n stdscr.clear()\n stdscr.addstr(word, curses.color_pair(1) | curses.A_BOLD)\n global temporary_show\n if show_intp or show_intp_fore:\n temporary_show = True\n stdscr.addstr(re.sub(r'^(?:\\[.*?\\])?\\n+|(? 0:\n\t\t\t\trecord.total_area = reduce(lambda x,y: x+y,areas)\n\t\t\telse:\n\t\t\t\trecord.total_area = 0\n\t\n\t@api.depends('line_ids')\n\tdef _get_count_crystals(self):\n\t\tfor record in self:\n\t\t\trecord.count_total_crystals = len(record.line_ids.mapped('lot_line_id').filtered(lambda x:not x.is_break))\n\n\t@api.depends('line_ids')\n\tdef _get_total_area_breaks(self):\n\t\tfor record in self:\n\t\t\tline_ids = record.line_ids.mapped('lot_line_id').filtered(lambda x: x.is_break)\n\t\t\tif len(line_ids) > 0:\n\t\t\t\trecord.total_area_breaks=reduce(lambda x,y: x+y,line_ids.mapped('area'))\n\t\t\telse:\n\t\t\t\trecord.total_area_breaks = 0\n\n\t@api.depends('line_ids')\n\tdef _get_count_breaks(self):\n\t\tfor record in self:\n\t\t\trecord.count_total_breaks = len(record.line_ids.mapped('lot_line_id').filtered(lambda x: x.is_break))\n\n\t@api.depends('total_area','total_area_breaks')\n\tdef _get_percentage_breaks(self):\n\t\tfor record in self:\n\t\t\tif record.total_area > 0:\n\t\t\t\trecord.percentage_breaks=(record.total_area_breaks/record.total_area)*100\n\t\t\telse:\n\t\t\t\trecord.percentage_breaks=0\n\t\n\t@api.depends('line_ids')\n\tdef _get_for_delivery(self):\n\t\tfor record in self:\n\t\t\trecord.for_delivery = len(record.line_ids.mapped('lot_line_id').filtered(lambda x: not x.entregado and not x.is_break))\n\n\t@api.multi\n\tdef makelist(self):\n\t\tself.ensure_one()\n\t\tlin = self.env['tracing.production.stock.line.lot'].search([])\n\t\tfor l in lin:\n\t\t\tl.unlink()\n\n\t\tlines=[]\n\t\tif self.invoice_id and self.search_param == 'invoice':\n\t\t\tinvoice_lines = self.invoice_id.invoice_line_ids\n\t\t\tsale_order_lines = invoice_lines.mapped('sale_line_ids')\n\t\t\tsale_order = sale_order_lines.mapped('order_id')\n\t\t\tif len(set(sale_order)) == 1:\n\t\t\t\tlines = sale_order.op_ids.mapped('line_ids').mapped('lot_line_id')\n\t\t\t\taux_lines = sale_order.op_ids.mapped('line_ids')\n\t\t\t\tlines = self._get_data(lines)\n\t\t\tif self.show_breaks:\n\t\t\t\tglass_breaks = self.env['glass.lot.line'].search([('order_line_id','in',aux_lines.ids),('is_break','=',True)])\n\t\t\t\tlines += glass_breaks\n\n\t\telif self.order_id and self.search_param == 'glass_order':\n\t\t\tlines=(self.order_id.line_ids.filtered(lambda x:x.lot_line_id)).mapped('lot_line_id')\n\t\t\tlines = self._get_data(lines)\n\t\t\tif self.show_breaks:\n\t\t\t\tglass_breaks = self.env['glass.lot.line'].search([('order_line_id','in',self.order_id.line_ids.ids),('is_break','=',True)])\n\t\t\t\tlines += glass_breaks\n\n\t\telif self.customer_id and self.search_param == 'customer':\n\t\t\tsale_orders = self.env['sale.order'].search([('partner_id','=',self.customer_id.id)])\n\t\t\tlines = sale_orders.mapped('op_ids').mapped('line_ids').mapped('lot_line_id')\n\t\t\tlines = self._get_data(lines)\n\t\t\tif self.show_breaks:\n\t\t\t\tglass_breaks = self.env['glass.lot.line'].search([('order_line_id','in',sale_orders.mapped('op_ids').mapped('line_ids').ids),('is_break','=',True)])\n\t\t\t\tlines += glass_breaks\n\n\t\tif len(lines)==0:\n\t\t\traise exceptions.Warning('NO SE HA ENCONTRADO INFORMACION.\\nEs posible que los cristales aun no hayan iniciado el proceso de produccion')\n\n\t\tfor line in lines:\n\t\t\tself.env['tracing.production.stock.line.lot'].create({\n\t\t\t\t'order_id':line.order_prod_id.id,\n\t\t\t\t'crystal_number':line.nro_cristal,\n\t\t\t\t'base1':line.base1,\n\t\t\t\t'base2':line.base2,\n\t\t\t\t'altura1':line.altura1,\n\t\t\t\t'altura2':line.altura2,\n\t\t\t\t'customer_id':line.order_prod_id.partner_id.id,\n\t\t\t\t'templado':line.templado,\n\t\t\t\t'ingresado':line.ingresado,\n\t\t\t\t'entregado':line.entregado,\n\t\t\t\t'arenado':line.arenado, # pendiente de cambio\n\t\t\t\t'embalado':False,\n\t\t\t\t'decorator':'break' if line.is_break else 'default',\n\t\t\t\t'parent_id':self.id,\n\t\t\t\t'lot_line_id':line.id,\n\t\t\t\t'is_break': line.is_break,\n\t\t\t\t})\n\t\tself.write({\n\t\t'tot_templado':len(list(filter(lambda x:x.templado and not x.is_break,lines))),\n\t\t'tot_ingresado':len(list(filter(lambda x:x.ingresado and not x.is_break,lines))),\n\t\t'tot_entregado':len(list(filter(lambda x:x.entregado and not x.is_break,lines))),\n\t\t'tot_arenado':len(list(filter(lambda x:x.arenado and not x.is_break,lines))),\n\t\t})\n\t\treturn True\n\n\t@api.multi\n\tdef _get_data(self,lot_lines):\n\t\tif self.filter_field:\n\t\t\tif self.filter_field == 'all':\n\t\t\t\tpass\n\t\t\telif self.filter_field == 'pending':\n\t\t\t\tlot_lines = lot_lines.filtered(lambda x:x.templado==False)\n\t\t\telif self.filter_field == 'produced':\n\t\t\t\tlot_lines = lot_lines.filtered(lambda x:x.templado==True)\n\t\t\telif self.filter_field == 'to inter':\n\t\t\t\tlot_lines = lot_lines.filtered(lambda x:x.templado==True and x.ingresado==False)\n\t\t\telif self.filter_field == 'to deliver':\n\t\t\t\tlot_lines = lot_lines.filtered(lambda x:x.ingresado==True and x.entregado==False)\n\t\t\telif self.filter_field == 'expired':\n\t\t\t\tnow = datetime.now().date()\n\t\t\t\tlot_lines = lot_lines.filtered(lambda x: self._str2date(x.order_prod_id.date_delivery) < now and x.templado == False)\n\t\tif self.date_ini and self.date_end and self.search_param == 'customer':\n\t\t\tstart = self._str2date(self.date_ini)\n\t\t\tend = self._str2date(self.date_end)\n\t\t\tlot_lines = lot_lines.filtered(lambda x: self._str2date(x.order_date_prod) < end and self._str2date(x.order_date_prod) > start)\n\t\tif not self.show_breaks:\n\t\t\tlot_lines = lot_lines.filtered(lambda x: not x.is_break)\n\t\treturn list(set(lot_lines))\n\n\tdef _str2date(self,string):\n\t\treturn datetime.strptime(string.replace('-',''),\"%Y%m%d\").date()\n\nclass Tracing_Production_Stock_Line_Lot(models.Model):\n\t_name = 'tracing.production.stock.line.lot'\n\n\tparent_id = fields.Many2one('glass.tracing.production.stock','Main')\n\torder_id = fields.Many2one('glass.order','Orden produccion')\n\tlot_line_id = fields.Many2one('glass.lot.line','Linea de lote')\n\tproduct_name = fields.Char('Producto',related='lot_line_id.order_line_id.product_id.name')\n\tcustomer_id = fields.Many2one('res.partner','Cliente')\n\tcrystal_number = fields.Integer('Nro. cristal')\n\tbase1=fields.Integer('Base1')\n\tbase2=fields.Integer('Base2')\n\taltura1=fields.Integer('Altura1')\n\taltura2=fields.Integer('Altura2')\n\tarenado = fields.Boolean('Arena')\n\tembalado = fields.Boolean('Embalado')\n\ttemplado=fields.Boolean('Templado')\n\tingresado=fields.Boolean('Ingresado') \n\tentregado=fields.Boolean('Entregado') \n\tdecorator = fields.Selection([('default','default'),('break','break'),('without_lot','without_lot')],default='default')\n\tis_break = fields.Boolean('Roto')\n\n\t@api.multi\n\tdef show_detail_tracing_line(self):\n\t\tview = self.env.ref('glass_production_order.show_detail_tracing_line_wizard_form', False)\n\t\twizard = self.env['show.detail.tracing.line.wizard'].create({'lot_line_id':self.lot_line_id.id})\n\t\treturn{\n\t\t\t'name': 'Detalle de Seguimiento',\n\t\t\t'res_id': wizard.id,\n\t\t\t'view_type': 'form',\n\t\t\t'view_mode': 'form',\n\t\t\t'res_model': 'show.detail.tracing.line.wizard',\n\t\t\t'view_id': view.id,\n\t\t\t'type': 'ir.actions.act_window',\n\t\t\t'target': 'new',\n\t\t} \n\n","sub_path":"Modulos/glass_production_order/models/glass_tracing_production_stock.py","file_name":"glass_tracing_production_stock.py","file_ext":"py","file_size_in_byte":9951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"463306610","text":"### Tools\n__all__ = [\"Dtool_ObjectToDict\", \"Dtool_funcToMethod\", \"Dtool_PreloadDLL\"]\n\nimport imp, sys, os\n\n# The following code exists to work around a problem that exists\n# with Python 2.5 or greater.\n\n# Specifically, Python 2.5 is designed to import files named *.pyd\n# only; it will not import files named *.dll (or *.so). We work\n# around this problem by explicitly preloading all of the dll's we\n# expect to need.\n\ndll_suffix = ''\nif sys.platform == \"win32\":\n # On Windows, dynamic libraries end in \".dll\".\n dll_ext = '.dll'\n module_ext = '.pyd'\n\n # We allow the caller to preload dll_suffix into the sys module.\n dll_suffix = getattr(sys, 'dll_suffix', None)\n\n if dll_suffix is None:\n # Otherwise, we try to determine it from the executable name:\n # python_d.exe implies _d across the board.\n dll_suffix = ''\n if sys.executable.endswith('_d.exe'):\n dll_suffix = '_d'\n \nelif sys.platform == \"darwin\":\n # On OSX, the dynamic libraries usually end in .dylib, but\n # sometimes we need .so.\n try:\n from direct.extensions_native.extensions_darwin import dll_ext\n except ImportError:\n dll_ext = '.dylib'\n module_ext = '.so'\nelse:\n # On most other UNIX systems (including linux), .so is used.\n dll_ext = '.so'\n module_ext = '.so'\n\nif sys.platform == \"win32\":\n # On Windows, we must furthermore ensure that the PATH is modified\n # to locate all of the DLL files.\n\n # First, search for the directory that contains all of our compiled\n # modules.\n target = None\n filename = \"libpandaexpress%s%s\" % (dll_suffix, dll_ext)\n for dir in sys.path + [sys.prefix]:\n lib = os.path.join(dir, filename)\n if (os.path.exists(lib)):\n target = dir\n if target == None:\n message = \"Cannot find %s\" % (filename)\n raise ImportError(message)\n\n # And add that directory to the system path.\n path = os.environ[\"PATH\"]\n if not path.startswith(target + \";\"):\n os.environ[\"PATH\"] = target + \";\" + path\n\ndef Dtool_FindModule(module):\n # Finds a .pyd module on the Python path.\n filename = module.replace('.', os.path.sep) + module_ext\n for dir in sys.path:\n lib = os.path.join(dir, filename)\n if (os.path.exists(lib)):\n return lib\n\n return None\n\ndef Dtool_PreloadDLL(module):\n if module in sys.modules:\n return\n\n # First find it as a .pyd module on the Python path.\n if Dtool_FindModule(module):\n # OK, we should have no problem importing it as is.\n return\n\n # Nope, we'll need to search for a dynamic lib and preload it.\n # Search for the appropriate directory.\n target = None\n filename = module.replace('.', os.path.sep) + dll_suffix + dll_ext\n for dir in sys.path + [sys.prefix]:\n lib = os.path.join(dir, filename)\n if (os.path.exists(lib)):\n target = dir\n break\n\n if target is None:\n message = \"DLL loader cannot find %s.\" % (module)\n raise ImportError(message)\n\n # Now import the file explicitly.\n pathname = os.path.join(target, filename)\n imp.load_dynamic(module, pathname) \n\n# Nowadays, we can compile libpandaexpress with libpanda into a\n# .pyd file called panda3d/core.pyd which can be imported without\n# any difficulty. Let's see if this is the case.\n\n# In order to support things like py2exe that play games with the\n# physical python files on disk, we can't entirely rely on\n# Dtool_FindModule to find our panda3d.core module. However, we\n# should be able to import it. To differentiate the old-style Panda\n# build (with .dll's) from the new-style Panda build (with .pyd's), we\n# first try to import panda3d.core directly; if it succeeds we're in a\n# new-style build, and if it fails we must be in an old-style build.\ntry:\n from panda3d.core import *\nexcept ImportError:\n Dtool_PreloadDLL(\"libpandaexpress\")\n from libpandaexpress import *\n\ndef Dtool_ObjectToDict(cls, name, obj):\n cls.DtoolClassDict[name] = obj;\n\ndef Dtool_funcToMethod(func, cls, method_name=None):\n \"\"\"Adds func to class so it is an accessible method; use method_name to specify the name to be used for calling the method.\n The new method is accessible to any instance immediately.\"\"\"\n if sys.version_info < (3, 0):\n func.im_class = cls\n func.im_func = func\n func.im_self = None\n if not method_name:\n method_name = func.__name__\n cls.DtoolClassDict[method_name] = func;\n","sub_path":"build/nirai/panda3d/direct/src/extensions_native/extension_native_helpers.py","file_name":"extension_native_helpers.py","file_ext":"py","file_size_in_byte":4505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"296493649","text":"'''\r\nDeveloper: Furkan Sürücü\r\nPurpose of Software: Reinforcement of learned python code and self-improvement\r\nWhat does program do?: The transcription of an input number with two digits.\r\n'''\r\nnum1_to_9 = {1: 'One', 2: 'Two', 3: 'Three', 4: 'Four', 5: 'Five', \\\r\n 6: 'Six', 7: 'Seven', 8: 'Eight', 9: 'Nine'}\r\nnum10_to_19 = {10: 'Ten', 11: 'Eleven', 12: 'Twelve', 13: 'Thirteen', 14: 'Fourteen', \\\r\n 15: 'Fifteen', 16: 'Sixteen', 17: 'Seventeen', 18: 'Eighteen', 19: 'Nineteen'}\r\nnum20_to_90 = ['Twenty','Thirty','Fourty','Fifty','Sixty','Seventy','Eighty','Ninety']\r\n\r\ndef reading_number(num):\r\n\r\n if num == 0:\r\n return \"zero\"\r\n elif 1 <= num <= 9:\r\n return num1_to_9[num]\r\n elif 10 <= num <= 19:\r\n return num10_to_19[num]\r\n elif 20 <= num <= 99:\r\n if num % 10 == 0:\r\n return num20_to_90[(num//10)-2]\r\n else:\r\n x = num20_to_90[(num//10)-2] +\" \"+ num1_to_9[num%10]\r\n return x\r\n else:\r\n return \"Out of range\"\r\nsayi = int(input(\"Enter a number:\\n\"))\r\nprint(sayi,\"------->\",reading_number(sayi))","sub_path":"reading_number.py","file_name":"reading_number.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"253210151","text":"n, m = map(int, input().split())\nnlist = [False] * (n + 1)\nnewlist = [0] * m\n\n\ndef back(index, n, m):\n if index == m:\n print(*newlist)\n return\n\n for i in range(1, n+1):\n if nlist[i]:\n continue\n nlist[i] = True\n newlist[index] = i\n back(index+1, n, m)\n nlist[i] = False\n\n\nback(0, n, m)\n","sub_path":"2020.07~/15649.py","file_name":"15649.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"191640889","text":"import time\nimport numpy as np\nfrom sklearn.metrics import f1_score\n\n\ndef disp_elapsed(t0):\n elapsed_seconds = time.time() - t0\n if elapsed_seconds < 60:\n print(\"Done: {0:.1f} seconds\".format(elapsed_seconds))\n elif elapsed_seconds < 3600:\n print(\"Done: {0:.1f} minutes\".format(elapsed_seconds / 60))\n else:\n print(\"Done: {0:.1f} hours\".format(elapsed_seconds / (60 * 60)))\n\n\ndef f1_best(y, pred, thresh_s=None):\n if thresh_s is None:\n thresh_s = np.linspace(0.1, 0.6, 41)\n best_f1 = 0\n best_thresh = 0\n for thresh in thresh_s:\n f1 = f1_score(y, (pred > thresh).astype(int))\n if f1 > best_f1:\n best_f1 = f1\n best_thresh = thresh\n return best_f1, best_thresh\n","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"341924967","text":"# -*- coding: utf-8 -*\r\n\r\nimport cv2\r\nimport numpy as np\r\nfrom chainer import Chain, serializers\r\nimport chainer.functions as F\r\nimport chainer.links as L\r\n\r\nclass MyMLP(Chain):\r\n def __init__(self, n_in=784, n_units=100, n_out=10):\r\n super(MyMLP, self).__init__(\r\n l1=L.Linear(n_in, n_units),\r\n l2=L.Linear(n_units, n_units),\r\n l3=L.Linear(n_units, n_out),\r\n )\r\n def __call__(self, x):\r\n h1 = F.relu(self.l1(x))\r\n h2 = F.relu(self.l2(h1))\r\n y = self.l3(h2)\r\n return y\r\n\r\ndef preprocessing(img):\r\n img = img[190:290,270:370]\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n img = cv2.GaussianBlur(img, (3, 3), 0)\r\n img = cv2.resize(img, (28, 28))\r\n res, img = cv2.threshold(img, 130, 255, cv2.THRESH_BINARY_INV)\r\n img = img.astype(np.float32) / 255\r\n img = np.array(img).reshape(1, 784)\r\n return img\r\n\r\ndef main():\r\n model = MyMLP()\r\n serializers.load_npz('mymodel.npz', model)\r\n\r\n capture = cv2.VideoCapture(0)\r\n if capture.isOpened() is False:\r\n raise(\"IO Error\")\r\n\r\n while True:\r\n ret, image = capture.read()\r\n if ret == False:\r\n continue\r\n\r\n cv2.rectangle(image, (269,189), (371,291), (0,0,255), 1)\r\n cv2.imshow(\"Capture\", image)\r\n k = cv2.waitKey(10)\r\n\r\n if k == ord('e'):\r\n img = preprocessing(image)\r\n num = model(img)\r\n print(num.data)\r\n print(np.argmax(num.data))\r\n\r\n if k == ord('q'):\r\n break\r\n capture.release()\r\n cv2.destroyAllWindows()\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"capture_mnist.py","file_name":"capture_mnist.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"535819342","text":"#!/usr/bin/env python2.7\n\nimport argparse\nimport sys\nfrom argparse import FileType\n\n\ndef parse_header(spec_lines):\n # M I L O A\n # aag 25 6 0 1 19\n # 0 1 2 3 4 5\n header_tokens = spec_lines[0].strip().split()\n M = int(header_tokens[1])\n nof_inputs = int(header_tokens[2])\n L = int(header_tokens[3])\n nof_outputs = int(header_tokens[4])\n A = int(header_tokens[5])\n assert M == L + nof_inputs + A, \\\n 'M is not the sum of I, L, A'\n return nof_inputs, nof_outputs\n\n\ndef get_inputs(spec_lines):\n nof_inputs, _ = parse_header(spec_lines)\n input_lines = spec_lines[1:nof_inputs+1]\n return set(int(l.strip()) for l in input_lines)\n\n\ndef is_input_symbol_table(l):\n # i0 i_1\n if l.strip().startswith('i'):\n tokens = l.split()\n wo_i = tokens[0][1:]\n try:\n int(wo_i)\n return True\n except ValueError:\n return False\n return False\n\n\ndef get_input_symbols(spec_lines):\n start = None\n end = None\n for i, l in enumerate(spec_lines):\n if l.strip()[0] == 'i' and not start:\n start = i\n if l.strip() == 'c':\n end = i\n break\n\n symbol_table_lines = spec_lines[start:end]\n symbol_table = [l for l in symbol_table_lines if is_input_symbol_table(l)]\n return symbol_table\n\n\ndef get_control_inputs(orig_spec_lines):\n control_inputs = set()\n input_symbols = get_input_symbols(orig_spec_lines)\n for s in input_symbols:\n # i0 i_1\n # i1 controllable_1\n tokens = s.strip().split()\n if tokens[1].startswith('controllable'):\n input_index = int(tokens[0][1:])\n input_literal = int(orig_spec_lines[1+input_index])\n control_inputs.add(input_literal)\n return control_inputs\n\n\ndef get_index_of_last_definition(spec_lines):\n for i, l in enumerate(spec_lines):\n if l.strip().startswith('i'):\n return i\n assert 0\n\n\ndef get_uncontrol_definitions(control_inputs, spec_lines):\n # TODO: what happens if some signal is short-circuited?\n uncontrol_definitions = set()\n\n nof_inputs, nof_outputs = parse_header(spec_lines)\n definitions_only = spec_lines[(nof_inputs + nof_outputs):\n get_index_of_last_definition(spec_lines)]\n for d in definitions_only:\n int_tokens = set(int(x) for x in d.strip().split())\n if int_tokens.isdisjoint(control_inputs):\n uncontrol_definitions.add(d)\n return uncontrol_definitions\n\n\ndef check_valid_metadata(spec_lines):\n in_comment = False\n for l in spec_lines:\n if '#!SYNTCOMP' in l:\n assert not in_comment, 'Invalid nesting of metadata labels'\n in_comment = True\n continue\n elif '#.' in l:\n assert in_comment, 'Metadata end-label does not have a start'\n in_comment = False\n continue\n assert not in_comment, 'Metadata labels not closed'\n\n\ndef main(original_lines, synthesized_lines):\n check_valid_metadata(original_lines)\n orig_all_inputs = get_inputs(original_lines)\n orig_control_inputs = get_control_inputs(original_lines)\n orig_uncontrol_inputs = orig_all_inputs.difference(orig_control_inputs)\n onof_inputs, onof_outputs = parse_header(original_lines)\n\n assert onof_outputs == 1, \\\n 'More than one output defined!'\n assert orig_uncontrol_inputs, \\\n 'There are no uncontrollable inputs!'\n assert orig_control_inputs, \\\n 'There are no controllable inputs!'\n\n # loading information about the synthesis output\n check_valid_metadata(synthesized_lines)\n synthd_all_inputs = get_inputs(synthesized_lines)\n synthd_control_inputs = get_control_inputs(synthesized_lines)\n synthd_uncontrol_inputs = \\\n synthd_all_inputs.difference(synthd_control_inputs)\n snof_inputs, snof_outputs = parse_header(synthesized_lines)\n\n assert snof_outputs == 1, \\\n 'More than one output defined!'\n assert synthd_uncontrol_inputs, \\\n 'There are no uncontrollable inputs!'\n assert len(synthd_uncontrol_inputs) == len(orig_uncontrol_inputs), \\\n 'The no. of uncontrollable inputs does not match after synthesis'\n assert len(synthd_control_inputs) == 0, \\\n 'There are controllable inputs left!'\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('original', type=FileType())\n parser.add_argument('synthesized', type=FileType())\n args = parser.parse_args(sys.argv[1:])\n main(list(args.original.readlines()), list(args.synthesized.readlines()))\n","sub_path":"eAAG/synt-post-processor/syntax_check.py","file_name":"syntax_check.py","file_ext":"py","file_size_in_byte":4607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"331865306","text":"import os\nimport sys\nimport argparse\nimport numpy as np\nimport theano.tensor as T\n\nhomepath = os.path.join('..', '..')\n\nif not homepath in sys.path:\n sys.path.insert(0, homepath)\n\nfrom dlearn.models.layer import FullConnLayer, ConvPoolLayer\nfrom dlearn.models.nnet import NeuralNet\nfrom dlearn.utils import actfuncs, costfuncs\nfrom dlearn.utils.serialize import load_data, save_data\nfrom dlearn.optimization import sgd\n\n\n# Program arguments parser\ndesctxt = \"\"\"\nTrain latent network. Use learned attribute and segmentation network.\n\"\"\"\n\ndataset_txt = \"\"\"\nThe input dataset data_name.pkl.\n\"\"\"\n\nattr_txt = \"\"\"\nThe attribute network model_name.pkl.\n\"\"\"\n\nseg_txt = \"\"\"\nThe segmentation network model_name.pkl.\n\"\"\"\n\noutput_txt = \"\"\"\nIf not specified, the output model will be saved as model_latent.pkl.\nOtherwise it will be saved as model_latent_name.pkl.\n\"\"\"\n\nparser = argparse.ArgumentParser(description=desctxt)\nparser.add_argument('-d', '--dataset', nargs=1, required=True,\n metavar='name', help=dataset_txt)\nparser.add_argument('-a', '--attribute', nargs=1, required=True,\n metavar='name', help=attr_txt)\nparser.add_argument('-s', '--segmentation', nargs=1, required=True,\n metavar='name', help=seg_txt)\nparser.add_argument('-o', '--output', nargs='?', default=None,\n metavar='name', help=output_txt)\n\nargs = parser.parse_args()\n\n\ndef train_model(dataset, attr_model, seg_model):\n\n def shape_constrained_pooling(fmaps):\n s = fmaps.sum(axis=[2, 3])\n Z = abs(actfuncs.tanh(fmaps)).sum(axis=[2, 3])\n return s / Z\n\n X = T.tensor4()\n A = T.matrix()\n\n feature_layers = []\n feature_layers.append(ConvPoolLayer(\n input=X,\n input_shape=(3, 160, 80),\n filter_shape=(32, 3, 5, 5),\n pool_shape=(2, 2),\n active_func=actfuncs.tanh,\n flatten=False,\n W=attr_model.blocks[0]._W,\n b=0.0\n ))\n\n feature_layers.append(ConvPoolLayer(\n input=feature_layers[-1].output,\n input_shape=feature_layers[-1].output_shape,\n filter_shape=(64, 32, 5, 5),\n pool_shape=(2, 2),\n active_func=actfuncs.tanh,\n flatten=False,\n W=attr_model.blocks[1]._W,\n b=0.0\n ))\n\n seg_layers = []\n seg_layers.append(FullConnLayer(\n input=feature_layers[-1].output.flatten(2),\n input_shape=np.prod(feature_layers[-1].output_shape),\n output_shape=1024,\n dropout_ratio=0.1,\n active_func=actfuncs.tanh,\n W=seg_model.blocks[2]._W,\n b=seg_model.blocks[2]._b\n ))\n\n seg_layers.append(FullConnLayer(\n input=seg_layers[-1].output,\n input_shape=seg_layers[-1].output_shape,\n output_shape=37 * 17,\n dropout_input=seg_layers[-1].dropout_output,\n active_func=actfuncs.sigmoid,\n W=seg_model.blocks[3]._W,\n b=seg_model.blocks[3]._b\n ))\n\n S = seg_layers[-1].output\n S = S * (S >= 0.1)\n S = S.reshape((S.shape[0], 37, 17))\n S = S.dimshuffle(0, 'x', 1, 2)\n\n S_dropout = seg_layers[-1].dropout_output\n S_dropout = S_dropout * (S_dropout >= 0.1)\n S_dropout = S_dropout.reshape((S_dropout.shape[0], 37, 17))\n S_dropout = S_dropout.dimshuffle(0, 'x', 1, 2)\n\n attr_layers = []\n '''\n attr_layers.append(ConvPoolLayer(\n input=feature_layers[-1].output * S,\n input_shape=feature_layers[-1].output_shape,\n filter_shape=(128, 64, 3, 3),\n pool_shape=(2, 2),\n dropout_input=feature_layers[-1].output * S_dropout,\n active_func=actfuncs.tanh,\n flatten=False,\n W=attr_model.blocks[2]._W,\n b=0.0\n ))\n '''\n\n attr_layers.append(FullConnLayer(\n input=shape_constrained_pooling(feature_layers[-1].output * S),\n input_shape=feature_layers[-1].output_shape,\n output_shape=64,\n dropout_input=shape_constrained_pooling(\n feature_layers[-1].dropout_output * S_dropout),\n dropout_ratio=0.1,\n active_func=actfuncs.tanh,\n W=attr_model.blocks[2]._W,\n b=attr_model.blocks[2]._b\n ))\n\n attr_layers.append(FullConnLayer(\n input=attr_layers[-1].output,\n input_shape=attr_layers[-1].output_shape,\n output_shape=11,\n dropout_input=attr_layers[-1].dropout_output,\n active_func=actfuncs.sigmoid,\n W=attr_model.blocks[3]._W,\n b=attr_model.blocks[3]._b\n ))\n\n model = NeuralNet(feature_layers + seg_layers + attr_layers,\n X, attr_layers[-1].output)\n model.target = A\n\n model.cost = costfuncs.binxent(attr_layers[-1].dropout_output, A) + \\\n 1e-3 * model.get_norm(2)\n model.error = costfuncs.binerr(attr_layers[-1].output, A)\n\n sgd.train(model, dataset, lr=1e-3, momentum=0.9,\n batch_size=100, n_epochs=300,\n epoch_waiting=10)\n\n return model\n\n\nif __name__ == '__main__':\n dataset_file = 'data_{0}.pkl'.format(args.dataset[0])\n attr_file = 'model_{0}.pkl'.format(args.attribute[0])\n seg_file = 'model_{0}.pkl'.format(args.segmentation[0])\n out_file = 'model_latent.pkl' if args.output is None else \\\n 'model_latent_{0}.pkl'.format(args.output)\n\n dataset = load_data(dataset_file)\n attr_model = load_data(attr_file)\n seg_model = load_data(seg_file)\n\n model = train_model(dataset, attr_model, seg_model)\n\n save_data(model, out_file)\n","sub_path":"examples/human_sar/net_latent.py","file_name":"net_latent.py","file_ext":"py","file_size_in_byte":5406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"78695765","text":"import argparse\nimport os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom flask import Flask, render_template, request\nfrom PIL import Image\nfrom skimage.transform import resize\nfrom tensorflow.python.client import device_lib\n\ndevice_lib.list_local_devices()\ntf.debugging.set_log_device_placement(False)\n\napp = Flask(__name__)\nimage_folder = os.path.join(\"static\", \"images\")\napp.config[\"UPLOAD_FOLDER\"] = image_folder\n\n\n@app.route(\"/\", methods=[\"GET\"])\ndef home():\n return render_template(\"index.html\")\n\n\n@app.route(\"/\", methods=[\"POST\"])\ndef predict():\n\n imagefile = request.files[\"imagefile\"]\n image_path = os.path.join(image_folder, imagefile.filename)\n imagefile.save(image_path)\n\n from tensorflow.keras.preprocessing import image\n\n img = image.load_img(image_path, target_size=(160, 160))\n x = image.img_to_array(img)\n prediction = mdl.predict_image(x)\n\n return render_template(\"index.html\", user_image=image_path, prediction_text=prediction)\n\n\ndef predict_image(image_path, model, class_names, visualize=1):\n\n container = []\n for file_name in os.listdir(image_path):\n container.append(np.array(Image.open(os.path.join(image_path, file_name))))\n\n container = [resize(img, [160, 160]) for img in container]\n # 5 160 160 3\n\n container = np.array(container)\n predictions = model.predict(container * 256)\n predictions = [np.argmax(pred) for pred in predictions]\n\n # np.stack(container).shape\n\n predictions = [class_names[pred] for pred in predictions]\n\n if visualize:\n plt.figure(figsize=(10, 10))\n\n for i in range(len(container)):\n _ = plt.subplot(1, 6, i + 1)\n plt.imshow((container[i] * 256).astype(\"uint8\"))\n plt.title(predictions[i])\n plt.axis(\"off\")\n\n return predictions\n\n\nclass Model_2:\n def __init__(self, model_location, class_name):\n self.model = tf.keras.models.load_model(model_location)\n self.class_name = class_name\n\n def predict_image(self, image):\n self.resized_image = resize(image, [160, 160])\n\n if self.resized_image.max() < 1.1:\n self.scaled_image = self.resized_image * 256\n else:\n self.scaled_image = self.resized_image\n # Needs to be taken out\n\n # If the first dimension of the image is not 1\n if self.scaled_image.shape[0] != 1:\n self.final_image = self.scaled_image[\n None,\n ]\n else:\n self.final_image = self.scaled_image\n pred_value = np.argmax(self.model.predict(self.final_image))\n pred_label = self.class_name[pred_value]\n\n return pred_label\n\n\nif __name__ == \"main\":\n class_name = os.listdir(os.path.join(\"images\", \"train\"))\n mdl = Model_2(\"trained_model\", class_name)\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"642676698","text":"import django_filters.rest_framework\nfrom django.shortcuts import render, get_object_or_404\nfrom rest_framework import viewsets, permissions, filters, status, generics\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom .models import Beer, BeerRating, BeerReview\nfrom .serializers import BeerSerializer, BeerRatingSerializer, BeerReviewSerializer\n\nclass BeerViewSet(viewsets.ModelViewSet):\n queryset = Beer.objects.all()\n serializer_class = BeerSerializer\n filter_backends = (filters.SearchFilter,)\n search_fields = ('name', 'country', 'style')\n\n\nclass BeerRatingListAPIView(APIView):\n def get(self, request, pk, format=None):\n ratings = BeerRating.objects.filter(beer__id=pk)\n serializer = BeerRatingSerializer(ratings,many=True)\n\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n def post(self, request, pk, format=None):\n user = request.user\n if not user.is_authenticated:\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n else:\n beer = get_object_or_404(Beer, pk=pk)\n serializer = BeerRatingSerializer(data=request.data)\n # if serializer.is_valid():\n # serializer.save(creator=user, beer=beer)\n # return Response(data=serializer.data, status=status.HTTP_200_OK)\n print(serializer)\n # else:\n return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass BeerRatingDetailAPIView(APIView):\n def get(self, request, pk, rating_id, format=None):\n user = request.user\n rating = get_object_or_404(BeerRating, pk=rating_id, beer__id=pk, creator=user)\n serializer = BeerRatingSerializer(rating)\n\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n\n def put(self, request, pk, format=None):\n user = request.user\n if not user.is_authenticated:\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n else:\n beer = get_object_or_404(Beer, pk=pk)\n serializer = BeerRatingSerializer()\n if serializer.is_valid():\n serializer.save(beer=beer, creator=user)\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n else:\n return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n user = request.user\n beershop = get_object_or_404(BeerShop, pk=pk)\n\n if beershop.owner == user:\n\n beershop.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n else:\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n","sub_path":"api/beers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"259055462","text":"from tkinter import *\r\nfrom tkinter import messagebox\r\nimport sqlite3 \r\nconn = sqlite3.connect('Database.db')\r\ndef main(p):\r\n p.destroy()\r\n root=Tk()\r\n filename = PhotoImage(file = \"img2.png\")\r\n background_label = Label(root, image=filename)\r\n background_label.place(x=0, y=0, relwidth=1, relheight=1)\r\n root[\"bg\"]=\"red\"\r\n root.minsize(600,500)\r\n root.maxsize(600,500)\r\n button=Button(root,text=\"Sign Up\",command=lambda: form(root),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=65,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n p=Button(root,text=\"Login\",command=lambda: login(root),anchor=CENTER,bg=\"pink\",fg=\"blue\",padx=75,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n button.place(x=200,y=250)\r\n p.place(x=200,y=180)\r\n root.mainloop()\r\n\r\ndef login(p):\r\n p.destroy()\r\n root1=Tk()\r\n filename = PhotoImage(file = \"img4.png\")\r\n background_label = Label(root1, image=filename)\r\n background_label.place(x=0, y=0, relwidth=1, relheight=1)\r\n ac=StringVar()\r\n pn=StringVar()\r\n root1[\"bg\"]=\"#49E3CE\"\r\n root1.minsize(700,800)\r\n root1.maxsize(700,800)\r\n label=Label(root1,text=\"User-Name:\",bg='white',font=(\"Helvetica\", 16),image=filename,compound=CENTER)\r\n label.place(x=150,y=300)\r\n e1=Entry(root1,textvariable=ac)\r\n e1.place(x=360,y=305)\r\n label1=Label(root1,text=\"PIN:\",bg='white',font=(\"Helvetica\", 16))\r\n label1.place(x=150,y=350)\r\n e2=Entry(root1,textvariable=pn,show=\"*\")\r\n e2.place(x=360,y=350)\r\n p=Button(root1,text=\"LOGIN\",command=lambda: valid(root1,ac,pn),anchor=CENTER,bg=\"green\",fg=\"black\",padx=75,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n p.place(x=220,y=430)\r\n root1.mainloop()\r\n\r\n\r\ndef valid(p,ac,pn):\r\n ac=ac.get()\r\n pn=int(pn.get())\r\n c.execute('select * from accounts where uname=\"%s\" AND pi=\"%i\"' % (ac,pn))\r\n l=c.fetchone()\r\n if(l!=None):\r\n home(p,l)\r\n else:\r\n messagebox.showerror(\"Invalid Account\",\"User not Found\")\r\n login(p)\r\n\r\ndef form(p):\r\n p.destroy()\r\n root2=Tk()\r\n filename = PhotoImage(file = \"imgf.png\")\r\n background_label = Label(root2, image=filename)\r\n background_label.place(x=0, y=0, relwidth=1, relheight=1)\r\n root2[\"bg\"]=\"#F0AE59\"\r\n root2.minsize(650,600)\r\n root2.maxsize(650,600)\r\n label1=Label(root2,text=\"Register Account\",font=(\"Helvetica\",32))\r\n label1.place(x=150,y=150)\r\n Name = Label(root2, text=\"Name: \",font=(\"Helvetica\", 16))\r\n Name.place(x=140, y=240)\r\n U_Name = Label(root2, text=\"User Name: \",font=(\"Helvetica\", 16))\r\n U_Name.place(x=140, y=280)\r\n Mail = Label(root2, text=\"Enter Mail:\",font=(\"Helvetica\", 16))\r\n Mail.place(x=140, y=320)\r\n Pin= Label(root2, text=\"Enter Pin:\",font=(\"Helvetica\", 16))\r\n Pin.place(x=140, y=360)\r\n\r\n\r\n name = StringVar()\r\n name_ent = Entry(root2, width=30, textvariable=name).place(x=320 , y=240)\r\n u_name= StringVar()\r\n U_name = Entry(root2, width=30, textvariable=u_name).place(x=320 , y=280)\r\n email= StringVar()\r\n passwd_ent = Entry(root2, width=30, textvariable=email).place(x=320 , y=320)\r\n pin = StringVar()\r\n Pin_g = Entry(root2,show=\"*\", width=30, textvariable=pin).place(x=320 , y=360)\r\n button=Button(root2,text=\"Submit\",command=lambda: Clarification(root2,name,u_name,email,pin),anchor=CENTER,bg=\"Grey\",fg=\"yellow\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n button.place(x=270,y=440)\r\n root2.mainloop()\r\n\r\n\r\n\r\ndef Clarification(p,name,u_name,email,pin):\r\n \r\n \r\n name=name.get()\r\n u_name=u_name.get()\r\n email=email.get()\r\n pin=pin.get()\r\n\r\n if(name==u_name):\r\n messagebox.showerror(\"Wrong Input\",\"Name and User-Name cannot be same\")\r\n form(p)\r\n elif(list(set(email)).count(\"@\")!=1 or list(set(email)).count(\".\")<1):\r\n messagebox.showerror(\"Wrong Input\",\"E-Mail FieldError\")\r\n form(p)\r\n elif(len(pin)!=4):\r\n messagebox.showerror(\"Wrong Input\",\"PIN should be four Digits\")\r\n form(p)\r\n i=0\r\n while(i<4):\r\n if(pin[i]==\"1\" or pin[i]==\"2\" or pin[i]==\"3\" or pin[i]==\"4\" or pin[i]==\"5\" or pin[i]==\"6\" or pin[i]==\"7\" or pin[i]==\"8\" or pin[i]==\"9\" or pin[i]==\"0\"):\r\n i+=1\r\n else:\r\n messagebox.showerror(\"Wrong Input\",\"PIN should contain Digits only\")\r\n form(p)\r\n \r\n \r\n c.execute('select max(id) from accounts')\r\n lst=c.fetchone()\r\n lst=list(lst)\r\n acc=int(lst[0])+1\r\n pin=int(pin)\r\n p.destroy()\r\n root3=Tk()\r\n filename = PhotoImage(file = \"img5.png\")\r\n background_label = Label(root3, image=filename)\r\n background_label.place(x=0, y=0, relwidth=1, relheight=1)\r\n root3.lift()\r\n root3.attributes('-topmost',True)\r\n \r\n root3[\"bg\"]=\"white\"\r\n root3.minsize(470,500)\r\n root3.maxsize(470,500)\r\n tex=Label(root3,text=\"Confirm Details\",bg=\"white\",fg=\"black\",font=(\"Helvetica\",45))\r\n tex.place(x=30,y=40)\r\n labelact=Label(root3,text=\"Acc No:\",bg=\"white\",fg=\"black\",font=(\"Helvetica\", 16))\r\n labelact.place(x=60,y=150)\r\n label=Label(root3,text=\"Name:\",bg=\"white\",fg=\"black\",font=(\"Helvetica\", 16))\r\n label.place(x=60,y=200)\r\n label2=Label(root3,text=\"User-Name:\",bg=\"white\",fg=\"black\",font=(\"Helvetica\", 16))\r\n label2.place(x=60,y=250)\r\n label3=Label(root3,text=\"Email:\",bg=\"white\",fg=\"black\",font=(\"Helvetica\", 16))\r\n label3.place(x=60,y=300)\r\n label3=Label(root3,text=\"Pin:\",bg=\"white\",fg=\"black\",font=(\"Helvetica\", 16))\r\n label3.place(x=60,y=350)\r\n #Entering Values...\r\n labelac=Label(root3,text=acc,bg=\"white\",fg=\"black\",font=(\"Helvetica\", 16))\r\n labelac.place(x=230,y=150)\r\n labela=Label(root3,text=name,bg=\"white\",fg=\"black\",font=(\"Helvetica\", 16))\r\n labela.place(x=230,y=200)\r\n label2a=Label(root3,text=u_name,bg=\"white\",fg=\"black\",font=(\"Helvetica\", 16))\r\n label2a.place(x=230,y=250)\r\n label3a=Label(root3,text=email,bg=\"white\",fg=\"black\",font=(\"Helvetica\", 16))\r\n label3a.place(x=230,y=300)\r\n label3a=Label(root3,text=pin,bg=\"white\",fg=\"black\",font=(\"Helvetica\", 16))\r\n label3a.place(x=230,y=350)\r\n button=Button(root3,text=\"Confirm\",command=lambda: conf(root3,name,u_name,email,pin,acc),anchor=CENTER,bg=\"Grey\",fg=\"yellow\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n button.place(x=180,y=400)\r\n \r\n root3.mainloop()\r\n \r\n \r\ndef conf(p,name,u_name,email,pin,acc):\r\n bal=0\r\n c.execute(\"\"\"insert into accounts values (?,?,?,?,?,?)\"\"\",(acc,u_name,pin,name,email,bal))\r\n conn.commit()\r\n login(p)\r\n\r\n\r\n \r\ndef home(p,l):\r\n \r\n p.destroy()\r\n root4=Tk()\r\n filename = PhotoImage(file = \"img11.png\")\r\n background_label = Label(root4, image=filename)\r\n background_label.place(x=0, y=0, relwidth=1, relheight=1)\r\n root4[\"bg\"]=\"black\"\r\n root4.minsize(800,600)\r\n root4.maxsize(800,600)\r\n label=Label(root4,text=\"Welcome \"+str.upper(l[3]),bg=\"yellow\",fg=\"red\",font=(\"Helvetica\", 40))\r\n label.place(x=180,y=80)\r\n label2=Label(root4,text=\"Account Number:\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n label2.place(x=100,y=150)\r\n label3=Label(root4,text=\"Current balance:\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n label3.place(x=100,y=190)\r\n #Entering Values...\r\n labela=Label(root4,text=l[0],bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n labela.place(x=300,y=150)\r\n label2a=Label(root4,text=l[5],bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n label2a.place(x=300,y=190)\r\n button=Button(root4,text=\"Deposit Money\",command=lambda: deposit(root4,l),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n button.place(x=30,y=300)\r\n button=Button(root4,text=\"Withdraw\",command=lambda: withdraw(root4,l),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n button.place(x=220,y=300)\r\n button=Button(root4,text=\"Send Money\",command=lambda: send(root4,l),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n button.place(x=360,y=300)\r\n button=Button(root4,text=\"Account Management\",command=lambda: management(root4,l),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n button.place(x=525,y=300)\r\n button=Button(root4,text=\"Logout\",command=lambda: main(root4),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n button.place(x=400,y=450)\r\n root4.mainloop()\r\n\r\ndef deposit(p,l):\r\n p.destroy()\r\n root5=Tk()\r\n filename = PhotoImage(file = \"dep.png\")\r\n background_label = Label(root5, image=filename)\r\n background_label.place(x=0, y=0, relwidth=1, relheight=1)\r\n root5[\"bg\"]=\"black\"\r\n root5.minsize(600,500)\r\n root5.maxsize(600,500)\r\n labeld=Label(root5,text=\"Deposit\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 40))\r\n labeld.place(x=180,y=50)\r\n label=Label(root5,text=\"Current Balance:\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n label.place(x=60,y=150)\r\n label1=Label(root5,text=\"Add Amount:\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n label1.place(x=60,y=200)\r\n label2=Label(root5,text=l[5],bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n label2.place(x=230,y=150)\r\n am=IntVar()\r\n e=Entry(root5,textvariable=am)\r\n e.place(x=230,y=205)\r\n button=Button(root5,text=\"ADD\",command=lambda: ad(root5,am,l),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n button.place(x=250,y=300)\r\n button=Button(root5,text=\"Home\",command=lambda: home(root5,l),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n button.place(x=140,y=400)\r\n button=Button(root5,text=\"Logout\",command=lambda: main(root5),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n button.place(x=350,y=400)\r\n root5.mainloop()\r\n\r\ndef ad(p,bal,l):\r\n l=list(l)\r\n bal=int(bal.get())\r\n bal=l[5]+bal\r\n l[5]=bal\r\n c.execute('update accounts set bal=\"%i\" where id=\"%i\"' %(bal,l[0]))\r\n conn.commit()\r\n messagebox.showinfo(\"Deposit Amount\",\"Money Successfully Added\")\r\n deposit(p,l)\r\n\r\n\r\ndef withdraw(p,l):\r\n p.destroy()\r\n root6=Tk()\r\n filename = PhotoImage(file = \"wit.png\")\r\n background_label = Label(root6, image=filename)\r\n background_label.place(x=0, y=0, relwidth=1, relheight=1)\r\n root6[\"bg\"]=\"black\"\r\n root6.minsize(600,500)\r\n root6.maxsize(600,500)\r\n labeld=Label(root6,text=\"Withdraw\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 40))\r\n labeld.place(x=180,y=50)\r\n label=Label(root6,text=\"Current Balance:\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n label.place(x=60,y=150)\r\n label1=Label(root6,text=\"Withdraw Amt\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n label1.place(x=60,y=200)\r\n label2=Label(root6,text=l[5],bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n label2.place(x=250,y=150)\r\n am=IntVar()\r\n e=Entry(root6,textvariable=am)\r\n e.place(x=250,y=205)\r\n button=Button(root6,text=\"Withdraw\",command=lambda: dec(root6,am,l),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n button.place(x=250,y=300)\r\n button=Button(root6,text=\"Home\",command=lambda: home(root6,l),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n button.place(x=140,y=400)\r\n button=Button(root6,text=\"Logout\",command=lambda: main(root6),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n button.place(x=350,y=400)\r\n root6.mainloop()\r\n\r\ndef dec(p,bal,l):\r\n l=list(l)\r\n bal=int(bal.get())\r\n if(bal>l[5]):\r\n messagebox.showerror(\"WithDraw Amount\",\"Insufficient Balance in Account\")\r\n withdraw(p,l)\r\n bal=l[5]-bal\r\n l[5]=bal\r\n c.execute('update accounts set bal=\"%i\" where id=\"%i\"' %(bal,l[0]))\r\n conn.commit()\r\n messagebox.showinfo(\"Withdraw Amount\",\"Money Successfully Withdrawn\")\r\n withdraw(p,l)\r\n\r\n\r\ndef send(p,l):\r\n p.destroy()\r\n root7=Tk()\r\n filename = PhotoImage(file = \"tran.png\")\r\n background_label = Label(root7, image=filename)\r\n background_label.place(x=0, y=0, relwidth=1, relheight=1)\r\n root7[\"bg\"]=\"black\"\r\n root7.minsize(600,500)\r\n root7.maxsize(600,500)\r\n labeld=Label(root7,text=\"Money Transfer\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 40))\r\n labeld.place(x=130,y=50)\r\n label=Label(root7,text=\"Current Balance:\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n label.place(x=60,y=150)\r\n label1=Label(root7,text=\"Receiver Account-Number:\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n label1.place(x=60,y=200)\r\n label2=Label(root7,text=\"Amount to transfer:\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n label2.place(x=60,y=240)\r\n label2=Label(root7,text=l[5],bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n label2.place(x=330,y=150)\r\n ac=IntVar()\r\n b=IntVar()\r\n e=Entry(root7,textvariable=ac)\r\n e.place(x=330,y=205)\r\n e2=Entry(root7,textvariable=b)\r\n e2.place(x=330,y=245)\r\n button=Button(root7,text=\"Send\",command=lambda: tra(root7,l,ac,b),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n button.place(x=250,y=300)\r\n button=Button(root7,text=\"Home\",command=lambda: home(root7,l),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n button.place(x=140,y=400)\r\n button=Button(root7,text=\"Logout\",command=lambda: main(root7),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n button.place(x=350,y=400)\r\n root7.mainloop()\r\n\r\ndef tra(p,l,ac,b):\r\n b=int(b.get())\r\n ac=int(ac.get())\r\n l=list(l)\r\n if(b>l[5]):\r\n messagebox.showerror(\"Transfer Amount\",\"Insufficient Balance in Account\")\r\n send(p,l)\r\n \r\n c.execute('select * from accounts where id=\"%i\"' %(ac))\r\n lst=c.fetchone()\r\n if(lst==None):\r\n messagebox.showerror(\"Invalid Account\",\"Reciever Account not Found\")\r\n send(p,l)\r\n b=l[5]-b\r\n l[5]=b \r\n lst=list(lst)\r\n lst[5]=lst[5]+b\r\n c.execute('update accounts set bal=\"%i\" where id=\"%i\"' %(b,l[0]))\r\n conn.commit()\r\n c.execute('update accounts set bal=\"%i\" where id=\"%i\"' %(lst[5],ac))\r\n conn.commit()\r\n messagebox.showinfo(\"Transfer Amount\",\"Money Successfully Transferred\")\r\n withdraw(p,l)\r\n\r\n\r\ndef management(p,l):\r\n p.destroy()\r\n root8=Tk()\r\n filename = PhotoImage(file = \"act.png\")\r\n background_label = Label(root8, image=filename)\r\n background_label.place(x=0, y=0, relwidth=1, relheight=1)\r\n root8[\"bg\"]=\"black\"\r\n root8.minsize(600,600)\r\n root8.maxsize(600,600)\r\n labeld=Label(root8,text=\"Account Managment\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 36))\r\n labeld.place(x=100,y=50)\r\n label=Label(root8,text=\"Change login Credentials\",bg=\"black\",fg=\"white\",font=(\"Helvetica\",20))\r\n label.place(x=140,y=150)\r\n button=Button(root8,text=\"Pin Change\",command=lambda: pinn(root8,l),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n button.place(x=70,y=200)\r\n button1=Button(root8,text=\"User-Name Change\",command=lambda: un(root8,l),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n button1.place(x=300,y=200)\r\n label1=Label(root8,text=\"Change Personal Details\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 20))\r\n label1.place(x=140,y=280)\r\n button2=Button(root8,text=\"Email Change\",command=lambda: em(root8,l),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n button2.place(x=70,y=350)\r\n button3=Button(root8,text=\"Account-Name Change\",command=lambda: na(root8,l),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n button3.place(x=300,y=350)\r\n buttona=Button(root8,text=\"Home\",command=lambda: home(root8,l),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n buttona.place(x=140,y=520)\r\n buttonb=Button(root8,text=\"Logout\",command=lambda: main(root8),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n buttonb.place(x=350,y=520)\r\n root8.mainloop()\r\n\r\ndef un(p,l):\r\n p.destroy()\r\n root9=Tk()\r\n filename = PhotoImage(file = \"set.png\")\r\n background_label = Label(root9, image=filename)\r\n background_label.place(x=0, y=0, relwidth=1, relheight=1)\r\n root9[\"bg\"]=\"black\"\r\n root9.minsize(600,600)\r\n root9.maxsize(600,600)\r\n labeld=Label(root9,text=\"Account Managment\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 36))\r\n labeld.place(x=100,y=50)\r\n labela=Label(root9,text=\"Changing User-Name\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 20))\r\n labela.place(x=140,y=130)\r\n label=Label(root9,text=\"Old User-Name:\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n label.place(x=60,y=180)\r\n labelaa=Label(root9,text=l[1],bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n labelaa.place(x=240,y=180)\r\n labele=Label(root9,text=\"New User-Name:\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n labele.place(x=60,y=230)\r\n ch=StringVar()\r\n e=Entry(root9,textvariable=ch)\r\n e.place(x=240,y=235)\r\n buttonc=Button(root9,text=\"Change\",command=lambda: alt1(root9,l,ch),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n buttonc.place(x=200,y=280)\r\n buttona=Button(root9,text=\"Home\",command=lambda: home(root9,l),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n buttona.place(x=140,y=400)\r\n buttonb=Button(root9,text=\"Logout\",command=lambda: main(root9),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n buttonb.place(x=350,y=400)\r\n root9.mainloop()\r\n\r\ndef alt1(p,l,ch):\r\n ch=str(ch.get())\r\n l=list(l)\r\n l[1]=ch\r\n c.execute('update accounts set uname=\"%s\" where id=\"%i\"' %(ch,l[0]))\r\n conn.commit()\r\n messagebox.showinfo(\"Change Details\",\"Changed User-Name Successfully\")\r\n management(p,l)\r\n\r\n\r\ndef pinn(p,l):\r\n p.destroy()\r\n root9=Tk()\r\n filename = PhotoImage(file = \"set.png\")\r\n background_label = Label(root9, image=filename)\r\n background_label.place(x=0, y=0, relwidth=1, relheight=1)\r\n root9[\"bg\"]=\"black\"\r\n root9.minsize(600,600)\r\n root9.maxsize(600,600)\r\n labeld=Label(root9,text=\"Account Managment\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 36))\r\n labeld.place(x=100,y=50)\r\n labela=Label(root9,text=\"Changing PIN\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 20))\r\n labela.place(x=140,y=130)\r\n label=Label(root9,text=\"Old PIN:\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n label.place(x=60,y=180)\r\n labelaa=Label(root9,text=l[2],bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n labelaa.place(x=240,y=180)\r\n labele=Label(root9,text=\"New PIN:\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n labele.place(x=60,y=230)\r\n ch=IntVar()\r\n e=Entry(root9,textvariable=ch)\r\n e.place(x=240,y=235)\r\n buttonc=Button(root9,text=\"Change\",command=lambda: alt2(root9,l,ch),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n buttonc.place(x=200,y=280)\r\n buttona=Button(root9,text=\"Home\",command=lambda: home(root9,l),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n buttona.place(x=140,y=400)\r\n buttonb=Button(root9,text=\"Logout\",command=lambda: main(root9),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n buttonb.place(x=350,y=400)\r\n root9.mainloop()\r\n\r\ndef alt2(p,l,ch):\r\n ch=int(ch.get())\r\n l=list(l)\r\n l[2]=ch\r\n c.execute('update accounts set pi=\"%i\" where id=\"%i\"' %(ch,l[0]))\r\n conn.commit()\r\n messagebox.showinfo(\"Change Details\",\"Changed PIN Successfully\")\r\n management(p,l)\r\n\r\ndef na(p,l):\r\n p.destroy()\r\n root9=Tk()\r\n filename = PhotoImage(file = \"set.png\")\r\n background_label = Label(root9, image=filename)\r\n background_label.place(x=0, y=0, relwidth=1, relheight=1)\r\n root9[\"bg\"]=\"black\"\r\n root9.minsize(600,600)\r\n root9.maxsize(600,600)\r\n labeld=Label(root9,text=\"Account Managment\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 36))\r\n labeld.place(x=100,y=50)\r\n labela=Label(root9,text=\"Changing Acct Holder Name\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 20))\r\n labela.place(x=140,y=130)\r\n label=Label(root9,text=\"Old Name:\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n label.place(x=60,y=180)\r\n labelaa=Label(root9,text=l[3],bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n labelaa.place(x=240,y=180)\r\n labele=Label(root9,text=\"New Name:\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n labele.place(x=60,y=230)\r\n ch=StringVar()\r\n e=Entry(root9,textvariable=ch)\r\n e.place(x=240,y=235)\r\n buttonc=Button(root9,text=\"Change\",command=lambda: alt3(root9,l,ch),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n buttonc.place(x=200,y=280)\r\n buttona=Button(root9,text=\"Home\",command=lambda: home(root9,l),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n buttona.place(x=140,y=400)\r\n buttonb=Button(root9,text=\"Logout\",command=lambda: main(root9),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n buttonb.place(x=350,y=400)\r\n root9.mainloop()\r\n\r\ndef alt3(p,l,ch):\r\n ch=str(ch.get())\r\n l=list(l)\r\n l[3]=ch\r\n c.execute('update accounts set name=\"%s\" where id=\"%i\"' %(ch,l[0]))\r\n conn.commit()\r\n messagebox.showinfo(\"Change Details\",\"Changed Account Holder Name Successfully\")\r\n management(p,l)\r\n\r\ndef em(p,l):\r\n p.destroy()\r\n root9=Tk()\r\n filename = PhotoImage(file = \"set.png\")\r\n background_label = Label(root9, image=filename)\r\n background_label.place(x=0, y=0, relwidth=1, relheight=1)\r\n root9[\"bg\"]=\"black\"\r\n root9.minsize(600,600)\r\n root9.maxsize(600,600)\r\n labeld=Label(root9,text=\"Account Managment\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 36))\r\n labeld.place(x=100,y=50)\r\n labela=Label(root9,text=\"Changing E-mail Address\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 20))\r\n labela.place(x=140,y=130)\r\n label=Label(root9,text=\"Old E-mail:\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n label.place(x=60,y=180)\r\n labelaa=Label(root9,text=l[4],bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n labelaa.place(x=240,y=180)\r\n labele=Label(root9,text=\"New E-mail:\",bg=\"black\",fg=\"white\",font=(\"Helvetica\", 16))\r\n labele.place(x=60,y=230)\r\n ch=StringVar()\r\n e=Entry(root9,textvariable=ch)\r\n e.place(x=240,y=235)\r\n buttonc=Button(root9,text=\"Change\",command=lambda: alt4(root9,l,ch),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n buttonc.place(x=200,y=280)\r\n buttona=Button(root9,text=\"Home\",command=lambda: home(root9,l),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n buttona.place(x=140,y=400)\r\n buttonb=Button(root9,text=\"Logout\",command=lambda: main(root9),anchor=CENTER,bg=\"yellow\",fg=\"red\",padx=15,pady=10,font=(\"Helvetica\", 16),relief=\"groove\")\r\n buttonb.place(x=350,y=400)\r\n root9.mainloop()\r\n\r\ndef alt4(p,l,ch):\r\n ch=str(ch.get())\r\n l=list(l)\r\n l[4]=ch\r\n c.execute('update accounts set email=\"%s\" where id=\"%i\"' %(ch,l[0]))\r\n conn.commit()\r\n messagebox.showinfo(\"Change Details\",\"Changed E-mail Address Successfully\")\r\n management(p,l)\r\n\r\nc = conn.cursor()\r\nc.execute(\"\"\" CREATE TABLE IF NOT EXISTS accounts (\r\n id integer PRIMARY KEY,\r\n uname text NOT NULL,\r\n pi integer,\r\n name text,\r\n email text); \"\"\")\r\n\r\n\r\nconn.commit()\r\n\r\ntest=Tk()\r\nmain(test)\r\n\r\n\r\n\r\n","sub_path":"pro.py","file_name":"pro.py","file_ext":"py","file_size_in_byte":23955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"531128558","text":"import unittest\nimport datetime\n\nfrom src import app, db\nfrom src.models import Page, Matches\n\n#################################################\n# Setup #\n#################################################\n\n\ndef CreatePage(url1: str) -> Page:\n \"\"\"Create a Page in the Database\"\"\"\n page = Page(\n name=url1,\n queried=1,\n )\n db.session.add(page)\n db.session.commit()\n return page\n\n\ndef CreateMatch() -> None:\n \"\"\"Creates two test Matches in the Database\"\"\"\n m1 = Matches(\n name=\"TESTURL1 => TESTURL2\",\n url1=CreatePage(\"TESTURL1\"),\n url2=CreatePage(\"TESTURL2\"),\n degrees=33,\n last=datetime.datetime.now(),\n )\n\n m2 = Matches(\n name=\"TESTURL3 => TESTURL4\",\n url1=CreatePage(\"TESTURL3\"),\n url2=CreatePage(\"TESTURL4\"),\n degrees=33,\n last=datetime.datetime.now(),\n )\n\n db.session.add(m1, m2)\n db.session.commit()\n\n\n#################################################\n# Tests #\n#################################################\n\n\nclass MainTests(unittest.TestCase):\n \"Tests for the routes conatined in Main.py\"\n\n def setUp(self):\n \"\"\"Executed prior to each test.\"\"\"\n app.config[\"TESTING\"] = True\n app.config[\"WTF_CSRF_ENABLED\"] = False\n app.config[\"DEBUG\"] = False\n app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///:memory:\"\n self.app = app.test_client()\n db.drop_all()\n db.create_all()\n\n def test_landing_page(self):\n \"\"\"TESTS Landing page appears on base route\"\"\"\n response = self.app.get(\"/\", follow_redirects=True)\n self.assertEqual(response.status_code, 200)\n\n res_txt = response.get_data(as_text=True)\n\n self.assertIn(\"input\", res_txt)\n self.assertIn(\"button\", res_txt)\n self.assertIn(\"Welcome to\", res_txt)\n\n def test_recent_querys(self):\n \"\"\"TESTS Landing page shows recent queries\"\"\"\n CreateMatch()\n\n response = self.app.get(\"/\", follow_redirects=True)\n self.assertEqual(response.status_code, 200)\n\n res_txt = response.get_data(as_text=True)\n self.assertIn(\"TESTURL1\", res_txt)\n self.assertIn(\"TESTURL2\", res_txt)\n\n def test_query_new(self):\n \"\"\"TESTS new query shows proper response\"\"\"\n data = {\n \"term1\": \"adolf_hitler\",\n \"term2\": \"dictator\"\n }\n response = self.app.post(\n \"/degree\", data=data, follow_redirects=True\n )\n self.assertEqual(response.status_code, 200)\n\n res_txt = response.get_data(as_text=True)\n\n self.assertIn(\"1 degrees\", res_txt)\n\n def test_query_cached(self):\n \"\"\"TESTS query that is stored in database\"\"\"\n CreateMatch()\n\n data = {\n \"term1\": \"TESTURL1\",\n \"term2\": \"TESTURL2\"\n }\n response = self.app.post(\n \"/degree\", data=data, follow_redirects=True\n )\n self.assertEqual(response.status_code, 200)\n\n res_txt = response.get_data(as_text=True)\n\n self.assertIn(\"33 degrees\", res_txt)\n self.assertIn(\"YAAAAY!\", res_txt)\n","sub_path":"src/main/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"78992398","text":"import os\nimport string\nimport nltk\nfrom nltk.corpus import gutenberg\nimport pandas as pd\n\nfrom gensim import corpora, models\nfrom collections import defaultdict\n\nfrom urllib import request\n\n\ndef tokenize(doc):\n\n stopwords = nltk.corpus.stopwords.words('english')\n tokens = doc.split()\n tokens = [t for t in tokens if t.lower() not in stopwords]\n tokens = [t.lower() for t in tokens if t.isalpha()]\n\n return tokens\n\n\nif not os.path.exists('shakespeare-complete-raw.txt'):\n print('downloading shakespeare text...')\n url = 'https://ocw.mit.edu/ans7870/6/6.006/s08/lecturenotes/files/t8.shakespeare.txt'\n response = request.urlopen(url)\n raw = response.read().decode('utf8')\n with open('shakespeare-complete-raw.txt', 'w') as o:\n o.write(raw)\n\nprint('processing text...')\ntext = open('shakespeare-complete-raw.txt').read()\nsonnets_complete = text[text.find('THE SONNETS'):text.find('THE END')]\nsonnets = sonnets_complete.split('\\n\\n\\n')\nsonnets = [tokenize(s) for s in sonnets]\nsonnets = sonnets[1:-1] # remove the first and last\n\ndictionary = corpora.Dictionary(sonnets)\n# dictionary.filter_extremes(no_above=0.25)\ndictionary.filter_n_most_frequent(100)\n\ncorpus = [dictionary.doc2bow(sonnet) for sonnet in sonnets]\n\nprint('training model...')\nmodel = models.LdaModel(corpus, id2word=dictionary, num_topics=20)\nprint(model.print_topics())\n","sub_path":"words.py","file_name":"words.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"440050470","text":"# -*- coding: UTF-8 -*-\n# Copyright 2014-2015 Luc Saffre\n# License: BSD (see file COPYING for details)\n\n\"\"\"Adds functionality for managing households (i.e. groups of humans\nwho live together in a same house).\n\n.. autosummary::\n :toctree:\n\n models\n choicelists\n fixtures.std\n fixtures.demo\n\nThis plugin is being extended by :ref:`welfare` in\n:mod:`lino_welfare.modlib.households`.\n\n\"\"\"\n\nfrom lino.api import ad, _\n\n\nclass Plugin(ad.Plugin):\n \"See :doc:`/dev/plugins`.\"\n verbose_name = _(\"Households\")\n person_model = \"contacts.Person\"\n\n def setup_main_menu(config, site, profile, m):\n mnugrp = site.plugins.contacts\n m = m.add_menu(mnugrp.app_label, mnugrp.verbose_name)\n m.add_action('households.Households')\n\n def setup_config_menu(config, site, profile, m):\n mnugrp = site.plugins.contacts\n m = m.add_menu(mnugrp.app_label, mnugrp.verbose_name)\n # m.add_action(Roles)\n m.add_action('households.Types')\n\n def setup_explorer_menu(config, site, profile, m):\n mnugrp = site.plugins.contacts\n m = m.add_menu(mnugrp.app_label, mnugrp.verbose_name)\n m.add_action('households.MemberRoles')\n m.add_action('households.Members')\n","sub_path":"lino/modlib/households/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"399885982","text":"import json\nfrom django.contrib.auth.models import User, Permission\nfrom django.core.exceptions import ValidationError\n\nfrom django.test import TestCase\nfrom django.urls import reverse\nfrom http import HTTPStatus\n\nfrom . import models\nfrom .forms import TenantForm, RoomForm, JournalForm\nfrom .settings import FIXTURES, CHECK_OUT_URL\n\n\nclass ViewTests(TestCase):\n\n def test_health_check(self):\n response = self.client.get(reverse('health_check'))\n self.assertEqual(response.status_code, HTTPStatus.OK)\n\n def test_index_view(self):\n response = self.client.get(reverse('index'))\n self.assertEqual(response.status_code, HTTPStatus.OK)\n\n def test_created_view(self):\n response = self.client.get(reverse('created'))\n self.assertEqual(response.status_code, HTTPStatus.OK)\n\n\nclass ConciergeViewTests(TestCase):\n fixtures = FIXTURES\n\n def setUp(self):\n # Создание пользователя\n test_user = User.objects.create_user(username='testuser1', password='12345')\n test_user.save()\n permission1 = Permission.objects.get(codename='view_tenant')\n permission2 = Permission.objects.get(codename='view_room')\n permission3 = Permission.objects.get(codename='view_journal')\n test_user.user_permissions.add(permission1)\n test_user.user_permissions.add(permission2)\n test_user.user_permissions.add(permission3)\n test_user.save()\n login = self.client.login(username='testuser1', password='12345')\n\n def test_tenant_detail_view(self):\n response = self.client.get(reverse('tenant_detail', kwargs={'pk': 11}))\n self.assertEqual(response.status_code, HTTPStatus.OK)\n\n def test_room_detail_view(self):\n response = self.client.get(reverse('room_detail', kwargs={'pk': 11}))\n self.assertEqual(response.status_code, HTTPStatus.OK)\n\n def test_journal_detail_view(self):\n response = self.client.get(reverse('journal_detail', kwargs={\"pk\": 5}))\n self.assertEqual(response.status_code, HTTPStatus.OK)\n\n def test_tenant_list_view(self):\n response = self.client.get(reverse('tenant_list'))\n self.assertEqual(response.status_code, 200)\n self.assertTrue('is_paginated' in response.context)\n\n def test_room_list_view(self):\n response = self.client.get(reverse('room_list'))\n self.assertEqual(response.status_code, 200)\n self.assertTrue('is_paginated' in response.context)\n\n def test_journal_list_view(self):\n response = self.client.get(reverse('journal_list'))\n self.assertEqual(response.status_code, 200)\n self.assertTrue('is_paginated' in response.context)\n\n\nclass ApiTest(TestCase):\n fixtures = FIXTURES\n\n def test_api_tenant(self):\n test_tenant = [{\"model\": \"mycore.tenant\",\n \"pk\": 11,\n \"fields\": {\n \"first_name\": \"John\",\n \"last_name\": \"Lennon\",\n \"date_of_birth\": \"1990-11-20\",\n \"phone\": \"123456789\",\n 'photo': '',\n 'notes': None}\n }]\n response = self.client.get(reverse('tenant_api', kwargs={'object_id': 11}))\n self.assertEqual(response.status_code, HTTPStatus.OK)\n self.assertEqual(json.loads(response.content.decode(\"utf-8\")), test_tenant)\n\n def test_api_room(self):\n test_room = [{\"model\": \"mycore.room\",\n \"pk\": 13,\n \"fields\": {\n \"number\": 333,\n \"max_guests\": 6,\n \"owner\": None,\n \"is_free\": True}\n }]\n response = self.client.get(reverse('room_api', kwargs={'object_id': 13}))\n self.assertEqual(response.status_code, HTTPStatus.OK)\n self.assertEqual(json.loads(response.content.decode(\"utf-8\")), test_room)\n\n def test_api_journal(self):\n test_journal = [{\"model\": \"mycore.journal\",\n \"pk\": 5,\n \"fields\": {\n \"room_id\": 12,\n \"guests_cnt\": 1,\n \"key_in_date\": \"2020-01-05T17:57:23Z\",\n \"key_out_date\": \"2020-01-05T18:05:09Z\",\n \"tenant_id\": 13,\n \"notes\": \"\"\n }\n }]\n response = self.client.get(reverse('journal_api', kwargs={'object_id': 5}))\n self.assertEqual(response.status_code, HTTPStatus.OK)\n self.assertEqual(json.loads(response.content.decode(\"utf-8\")), test_journal)\n\n\nclass FormTests(TestCase):\n\n def test_tenant_form_valid(self):\n form = TenantForm(data={'first_name': 'AAA', 'last_name': 'BBB',\n 'date_of_birth': '2000-05-05', 'phone': '1234567'})\n self.assertTrue(form.is_valid())\n\n def test_tenant_form_invalid(self):\n form = TenantForm(data={'first_name': None, 'last_name': None,\n 'date_of_birth': '2000-05-05', 'phone': '1234567'})\n self.assertFalse(form.is_valid())\n\n def test_room_form_valid(self):\n form = RoomForm(data={'number': 15, 'max_guests': 2})\n self.assertTrue(form.is_valid())\n\n def _test_room_form_invalid(self):\n form = RoomForm(data={'number': 'qwerty', 'max_guests': '2'})\n self.assertFalse(form.is_valid())\n\n def test_journal_form_valid(self):\n form = JournalForm(data={'room_id': 11, 'tenant_id': 11, 'guests_count': 2,\n 'key_in_date': '2020-01-05', 'key_out_date': None})\n self.assertTrue(form.is_valid())\n\n def test_journal_form_invalid(self):\n form = JournalForm(data={'room_id': 'z', 'tenant_id': '1', 'guests_count': '3',\n 'key_in_date': '2020-01-05', 'key_out_date': None})\n self.assertFalse(form.is_valid())\n\n\nclass FormViewTests(TestCase):\n fixtures = FIXTURES\n\n def setUp(self):\n test_user = User.objects.create_user(username='testuser', password='12345')\n test_user.save()\n permission1 = Permission.objects.get(codename='add_tenant')\n permission2 = Permission.objects.get(codename='add_room')\n permission3 = Permission.objects.get(codename='add_journal')\n test_user.user_permissions.add(permission1)\n test_user.user_permissions.add(permission2)\n test_user.user_permissions.add(permission3)\n test_user.save()\n login = self.client.login(username='testuser', password='12345')\n\n def test_tenant_form_view(self):\n tenant_count = models.Tenant.objects.count()\n response = self.client.post('/tenant/', data={'first_name': 'AAA', 'last_name': 'BBB',\n 'date_of_birth': '2000-05-05', 'phone': '1234567'})\n self.assertEqual(response.status_code, 302)\n self.assertEqual(models.Tenant.objects.count(), tenant_count + 1)\n\n def test_room_form_view(self):\n room_count = models.Room.objects.count()\n response = self.client.post('/room/', data={'number': '55', 'max_guests': '2'})\n self.assertEqual(response.status_code, 302)\n self.assertEqual(models.Room.objects.count(), room_count + 1)\n\n def test_journal_form_view(self):\n journal_count = models.Journal.objects.count()\n response = self.client.post('/journal/', data={'room_id': '11', 'guests_count': '2',\n 'key_in_date': '2000-05-05', 'tenant_id': '11'})\n self.assertEqual(response.status_code, 302)\n self.assertEqual(models.Journal.objects.count(), journal_count + 1)\n\n\nclass GetKeyTest(TestCase):\n fixtures = FIXTURES\n\n def setUp(self):\n test_user = User.objects.create_user(username='testuser', password='12345')\n test_user.save()\n permission1 = Permission.objects.get(codename='add_journal')\n permission2 = Permission.objects.get(codename='change_journal')\n permission3 = Permission.objects.get(codename='change_room')\n test_user.user_permissions.add(permission1, permission2, permission3)\n login = self.client.login(username='testuser', password='12345')\n\n def test_get_room(self):\n free_room_count1 = models.Room.objects.filter(is_free=True).count()\n self.client.post('/journal/', data={'room_id': '12', 'guests_count': '2',\n 'key_in_date': '2000-05-05', 'tenant_id': '11'})\n\n free_room_count2 = models.Room.objects.filter(is_free=True).count()\n self.assertEqual(free_room_count1 - 1, free_room_count2)\n\n def test_get_room_free(self):\n free_room_count1 = models.Room.objects.filter(is_free=True).count()\n self.client.post('/journal/', data={'room_id': '15', 'guests_count': '2',\n 'key_in_date': '2019-07-05', 'tenant_id': '12'})\n\n free_room_count2 = models.Room.objects.filter(is_free=True).count()\n journal = models.Journal.objects.filter().latest('id')\n self.client.post(f'{CHECK_OUT_URL}{journal.id}/check_out_form')\n\n free_room_count3 = models.Room.objects.filter(is_free=True).count()\n self.assertEqual(free_room_count1 - 1, free_room_count2)\n self.assertEqual(free_room_count1, free_room_count3)\n\n def test_invalid_get_room(self):\n free_room_count1 = models.Room.objects.filter(is_free=True).count()\n self.client.post('/journal/', data={'room_id': '14', 'guests_count': '2',\n 'key_in_date': '2019-07-05', 'tenant_id': '12'})\n free_room_count2 = models.Room.objects.filter(is_free=True).count()\n with self.assertRaises(ValidationError):\n self.client.post('/journal/', data={'room_id': '14', 'guests_count': '2',\n 'key_in_date': '2019-07-05', 'tenant_id': '12'})\n free_room_count3 = models.Room.objects.filter(is_free=True).count()\n self.assertEqual(free_room_count1 - 1, free_room_count2)\n self.assertEqual(free_room_count2, free_room_count3)\n","sub_path":"app/concierge/mycore/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":10312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"378116979","text":"import os\nimport sys\n\nimport numpy as np\nimport tensorflow as tf\nfrom google.protobuf import text_format\n\nfrom config import Config\nfrom language_model.lm_1b import data_utils\nfrom load_model import LoadModel\n\n\ndef remove_non_ascii_tokens(vocab, vocab_length=-1):\n filtered_vocab = []\n filtered_vocab_ids = []\n\n if vocab_length != -1:\n vsize = vocab_length\n else:\n vsize = vocab.size\n\n for i in range(vsize):\n token = vocab.id_to_word(i)\n try:\n token.encode('ascii')\n filtered_vocab_ids.append(i)\n filtered_vocab.append(token)\n except UnicodeEncodeError:\n continue\n return filtered_vocab_ids, filtered_vocab\n\n\ndef build_filtered_vocab(vocab, vocab_length=-1):\n\n # set vocab output filename\n if vocab_length != -1:\n filename = Config.filename_vocab_short\n else:\n filename = Config.filename_vocab\n\n # remove non ASCII chars from vocab\n print(\"Building filtered vocab...\")\n filtered_vocab_ids, filtered_vocab = remove_non_ascii_tokens(vocab, vocab_length)\n\n # write vocab to file\n print(\"Writing filtered vocab in {}...\".format(filename))\n with open(filename, \"w\") as f:\n f.write(\"\\n\".join(filtered_vocab))\n print(\"- done. Wrote {} tokens\".format(len(filtered_vocab)))\n\n return filtered_vocab_ids\n\n\ndef dump_lm(vocab, vocab_length=-1, dump_as_txt=False, write_softmax=False, print_emb_status_every=100):\n\n '''\n Save softmax, fitlered vocab and embeddings to file \n\n Args:\n vocab: Contains vocabulary size and converts word to ids\n vocab_length: shorten the filtered vocab (also shortens embs)\n dump_as_txt: save language model embeddings also as txt file\n dum_softmax: save softmax to npy file\n print_emb_status_every: prints a status msg every n token during embeddings loading\n '''\n\n inputs = np.zeros([Config.BATCH_SIZE, Config.NUM_TIMESTEPS], np.int32)\n targets = np.zeros([Config.BATCH_SIZE, Config.NUM_TIMESTEPS], np.int32)\n weights = np.ones([Config.BATCH_SIZE, Config.NUM_TIMESTEPS], np.float32)\n\n sess, t = LoadModel(Config.pbtxt, Config.ckpt)\n\n dim = Config.emb_dim\n\n # dump softmax to file\n if write_softmax:\n dump_softmax(sess, t, weights)\n\n # create filtered vocab\n vocab_ids = build_filtered_vocab(vocab, vocab_length)\n\n # shorten vocab\n if vocab_length != -1:\n vocab_ids = vocab_ids[:vocab_length]\n\n # set output files\n if vocab_length != -1:\n filename_emb_npy = Config.filename_emb_short.format(dim)\n filename_emb_text = Config.filename_emb_text_short.format(dim)\n else:\n filename_emb_npy = Config.filename_emb_full\n filename_emb_text = Config.filename_emb_text_full\n\n # init embeddings tensor\n all_embs = np.zeros([len(vocab_ids), dim])\n\n # collect embeddings tensor for each token in vocab\n print(\"Starting to collect \", len(vocab_ids), \" word embeddings...\")\n for i, word_id in enumerate(vocab_ids):\n input_dict = {t['inputs_in']: inputs,\n t['targets_in']: targets,\n t['target_weights_in']: weights}\n if 'char_inputs_in' in t:\n input_dict[t['char_inputs_in']] = (\n vocab.word_char_ids[word_id].reshape([-1, 1, Config.MAX_WORD_LEN]))\n\n embs = sess.run(t['all_embs'], input_dict)\n all_embs[i, :] = embs\n\n if print_emb_status_every != -1:\n if (i+1) % print_emb_status_every == 0:\n print('Finished word embedding %d/%d - index[%d] %s' % (\n i+1, len(vocab_ids), i, vocab.id_to_word(word_id)))\n\n print(\"Finished all\", len(vocab_ids), \"word embeddings\")\n\n # write embeddings to compressed npy file\n np.save(filename_emb_npy, all_embs)\n print('Embeddings saved to npy file.')\n\n # write embeddings to txt file\n if dump_as_txt:\n np.savetxt(filename_emb_text, all_embs)\n print('Embeddings saved to txt file.')\n\n\ndef dump_softmax(sess, t, weights):\n softmax_weights = sess.run(t['softmax_weights'])\n np.save(Config.filename_softmax, softmax_weights)\n print('Finished writing softmax to npy file.')\n\n# Vocabulary containing character-level information.\nvocab = data_utils.CharsVocabulary(Config.file_lm_vocab, Config.MAX_WORD_LEN)\n\ndump_lm(vocab, \n vocab_length=-1, \n dump_as_txt=True,\n write_softmax=True, \n print_emb_status_every=10000)\n","sub_path":"dump_emb.py","file_name":"dump_emb.py","file_ext":"py","file_size_in_byte":4446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"167379939","text":"import math\nimport lark\n\nimport scope\n\nfrom variable import Variable\nfrom function import Function\nfrom type_check_error import display_type\nfrom type import NGenericType\nfrom enums import EnumType, EnumValue\nfrom native_types import n_list_type, n_map_type, NMap, n_cmd_type, n_maybe_type, maybe_generic, none, yes, n_result_type, result_ok_generic, result_err_generic, ok, err\n\ndef substr(start, end, string):\n\treturn string[start:end]\n\ndef char_at(index, string):\n\tif index < 0 or index >= len(string):\n\t\treturn none\n\telse:\n\t\treturn yes(string[index])\n\ndef item_at(index, lis):\n\tif index < 0 or index >= len(lis):\n\t\treturn none\n\telse:\n\t\treturn yes(lis[index])\n\ndef length(string):\n\ttry:\n\t\treturn len(string)\n\texcept TypeError:\n\t\treturn len(str(string))\n\nasync def filter_map(transformer, lis):\n\tnew_list = []\n\tfor item in lis:\n\t\ttransformed = await transformer.run([item])\n\t\tif transformed.variant == \"yes\":\n\t\t\tnew_list.append(transformed.values[0])\n\treturn new_list\n\ndef type_display(o):\n\tif type(o) == Function:\n\t\treturn str(o)\n\treturn type(o).__name__\n\ndef with_default(default_value, maybe_value):\n\tif maybe_value.variant == \"yes\":\n\t\treturn maybe_value.values[0]\n\telse:\n\t\treturn default_value\n\ndef cmd_then(n_function, cmd):\n\tasync def then(result):\n\t\treturn (await n_function.run([result])).eval\n\treturn cmd.then(then)\n\ndef map_from(entries):\n\t# NMap extends dict so it's basically a dict, but this way we can\n\t# distinguish between a record and a map.\n\treturn NMap(entries)\n\ndef map_get(key, map):\n\titem = map.get(key)\n\tif item is None:\n\t\treturn none\n\telse:\n\t\treturn yes(item)\n\ndef entries(n_map):\n\t# NMap extends dict so it's basically a dict, but this way we can\n\t# distinguish between a record and a map.\n\treturn list(n_map.items())\n\ndef special_print(val):\n\tif isinstance(val, str):\n\t\tprint(val)\n\telse:\n\t\tdisplay, _ = scope.display_value(val, indent=\" \")\n\t\tprint(display)\n\treturn val\n\n# Define global functions/variables\ndef add_funcs(global_scope):\n\tglobal_scope.variables[\"none\"] = Variable(n_maybe_type, none)\n\n\tglobal_scope.add_native_function(\n\t\t\"intInBase10\",\n\t\t[(\"number\", \"int\")],\n\t\t\"str\",\n\t\tstr,\n\t)\n\tglobal_scope.add_native_function(\n\t\t\"round\",\n\t\t[(\"number\", \"float\")],\n\t\t\"int\",\n\t\tround,\n\t)\n\tglobal_scope.add_native_function(\n\t\t\"floor\",\n\t\t[(\"number\", \"float\")],\n\t\t\"int\",\n\t\tmath.floor,\n\t)\n\tglobal_scope.add_native_function(\n\t\t\"ceil\",\n\t\t[(\"number\", \"float\")],\n\t\t\"int\",\n\t\tmath.ceil,\n\t)\n\tglobal_scope.add_native_function(\n\t\t\"charCode\",\n\t\t[(\"character\", \"char\")],\n\t\t\"int\",\n\t\tord,\n\t)\n\tglobal_scope.add_native_function(\n\t\t\"intCode\",\n\t\t[(\"number\", \"int\")],\n\t\t\"char\",\n\t\tchr,\n\t)\n\tglobal_scope.add_native_function(\n\t\t\"charAt\",\n\t\t[(\"location\", \"int\"), (\"string\", \"str\")],\n\t\tn_maybe_type.with_typevars([\"char\"]),\n\t\tchar_at,\n\t)\n\tglobal_scope.add_native_function(\n\t\t\"substring\",\n\t\t[(\"start\", \"int\"), (\"end\", \"int\"), (\"string\", \"str\")],\n\t\t\"str\",\n\t\tsubstr,\n\t)\n\tglobal_scope.add_native_function(\n\t\t\"len\",\n\t\t[(\"obj\", NGenericType(\"t\"))],\n\t\t\"int\",\n\t\tlength,\n\t)\n\tglobal_scope.add_native_function(\n\t\t\"split\",\n\t\t[(\"splitter\", \"char\"), (\"string\", \"str\")],\n\t\tn_list_type.with_typevars([\"str\"]),\n\t\tlambda string, splitter: string.split(splitter)\n\t)\n\tglobal_scope.add_native_function(\n\t\t\"strip\",\n\t\t[(\"string\", \"str\")],\n\t\t\"str\",\n\t\tlambda string: string.strip()\n\t)\n\tglobal_scope.add_native_function(\n\t\t\"range\",\n\t\t[(\"start\", \"int\"), (\"end\", \"int\"), (\"step\", \"int\")],\n\t\tn_list_type.with_typevars([\"int\"]),\n\t\tlambda start, end, step: list(range(start, end, step))\n\t)\n\tglobal_scope.add_native_function(\n\t\t\"type\",\n\t\t[(\"obj\", NGenericType(\"t\"))],\n\t\t\"str\",\n\t\ttype_display,\n\t)\n\tprint_generic = NGenericType(\"t\")\n\tglobal_scope.add_native_function(\n\t\t\"print\",\n\t\t[(\"val\", print_generic)],\n\t\tprint_generic,\n\t\tspecial_print\n\t)\n\titem_at_generic = NGenericType(\"t\")\n\tglobal_scope.add_native_function(\n\t\t\"itemAt\",\n\t\t[(\"index\", \"int\"), (\"list\", n_list_type.with_typevars([item_at_generic]))],\n\t\tn_maybe_type.with_typevars([item_at_generic]),\n\t\titem_at\n\t)\n\tappend_generic = NGenericType(\"t\")\n\tglobal_scope.add_native_function(\n\t\t\"append\",\n\t\t[(\"item\", append_generic), (\"list\", n_list_type.with_typevars([item_at_generic]))],\n\t\tn_list_type.with_typevars([item_at_generic]),\n\t\tlambda item, l: l.__add__([item])\n\t)\n\tfilter_map_generic_a = NGenericType(\"a\")\n\tfilter_map_generic_b = NGenericType(\"b\")\n\tglobal_scope.add_native_function(\n\t\t\"filterMap\",\n\t\t[\n\t\t\t(\"function\", (filter_map_generic_a, n_maybe_type.with_typevars([filter_map_generic_b]))),\n\t\t\t(\"list\", n_list_type.with_typevars([filter_map_generic_a]))\n\t\t],\n\t\tn_list_type.with_typevars([filter_map_generic_b]),\n\t\tfilter_map\n\t)\n\tglobal_scope.add_native_function(\n\t\t\"yes\",\n\t\t[(\"value\", maybe_generic)],\n\t\tn_maybe_type.with_typevars([maybe_generic]),\n\t\tyes,\n\t)\n\tdefault_generic = NGenericType(\"t\")\n\tglobal_scope.add_native_function(\n\t\t\"default\",\n\t\t[(\"default\", default_generic), (\"maybeValue\", n_maybe_type.with_typevars([default_generic]))],\n\t\tdefault_generic,\n\t\twith_default,\n\t)\n\tglobal_scope.add_native_function(\n\t\t\"ok\",\n\t\t[(\"value\", result_ok_generic)],\n\t\tn_result_type.with_typevars([result_ok_generic, result_err_generic]),\n\t\tok,\n\t)\n\tglobal_scope.add_native_function(\n\t\t\"err\",\n\t\t[(\"error\", result_err_generic)],\n\t\tn_result_type.with_typevars([result_ok_generic, result_err_generic]),\n\t\terr,\n\t)\n\tthen_generic_in = NGenericType(\"a\")\n\tthen_generic_out = NGenericType(\"b\")\n\tglobal_scope.add_native_function(\n\t\t\"then\",\n\t\t[(\"thenFunction\", (then_generic_in, n_cmd_type.with_typevars([then_generic_out]))), (\"cmd\", n_cmd_type.with_typevars([then_generic_in]))],\n\t\tn_cmd_type.with_typevars([then_generic_out]),\n\t\tcmd_then,\n\t)\n\tmap_from_generic_key = NGenericType(\"k\")\n\tmap_from_generic_value = NGenericType(\"v\")\n\tglobal_scope.add_native_function(\n\t\t\"mapFrom\",\n\t\t[(\"entries\", n_list_type.with_typevars([[map_from_generic_key, map_from_generic_value]]))],\n\t\tn_map_type.with_typevars([map_from_generic_key, map_from_generic_value]),\n\t\tmap_from,\n\t)\n\tmap_get_generic_key = NGenericType(\"k\")\n\tmap_get_generic_value = NGenericType(\"v\")\n\tglobal_scope.add_native_function(\n\t\t\"getValue\",\n\t\t[(\"key\", map_get_generic_key), (\"map\", n_map_type.with_typevars([map_get_generic_key, map_get_generic_value]))],\n\t\tn_maybe_type.with_typevars([map_get_generic_value]),\n\t\tmap_get,\n\t)\n\tentries_generic_key = NGenericType(\"k\")\n\tentries_generic_value = NGenericType(\"v\")\n\tglobal_scope.add_native_function(\n\t\t\"entries\",\n\t\t[(\"map\", n_map_type.with_typevars([entries_generic_key, entries_generic_value]))],\n\t\tn_list_type.with_typevars([[entries_generic_key, entries_generic_value]]),\n\t\tentries,\n\t)\n\n\tglobal_scope.types['str'] = 'str'\n\tglobal_scope.types['char'] = 'char'\n\tglobal_scope.types['int'] = 'int'\n\tglobal_scope.types['float'] = 'float'\n\tglobal_scope.types['bool'] = 'bool'\n\tglobal_scope.types['list'] = n_list_type\n\tglobal_scope.types['map'] = n_map_type\n\tglobal_scope.types['cmd'] = n_cmd_type\n\tglobal_scope.types['maybe'] = n_maybe_type\n\tglobal_scope.types['result'] = n_result_type\n","sub_path":"python/native_functions.py","file_name":"native_functions.py","file_ext":"py","file_size_in_byte":6890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"304784949","text":"from django.db import models\nfrom uuslug import uuslug\n\nimport catalog.api.controllers.search_controller as search_controller\nfrom catalog.api.models.media import (\n AbstractMedia,\n AbstractMediaReport,\n AbstractDeletedMedia,\n AbstractMatureMedia,\n AbstractMediaList,\n)\n\n\nclass Image(AbstractMedia):\n width = models.IntegerField(blank=True, null=True)\n height = models.IntegerField(blank=True, null=True)\n\n class Meta(AbstractMedia.Meta):\n db_table = 'image'\n\n\nclass ImageReport(AbstractMediaReport):\n class Meta:\n db_table = 'nsfw_reports'\n\n @property\n def image_url(self):\n return super(ImageReport, self).url('photos')\n\n def save(self, *args, **kwargs):\n kwargs.update({\n 'index_name': 'image',\n 'media_class': Image,\n 'mature_class': MatureImage,\n 'deleted_class': DeletedImage,\n })\n super(ImageReport, self).save(*args, **kwargs)\n\n\nclass DeletedImage(AbstractDeletedMedia):\n pass\n\n\nclass MatureImage(AbstractMatureMedia):\n \"\"\" Stores all images that have been flagged as 'mature'. \"\"\"\n\n def delete(self, *args, **kwargs):\n es = search_controller.es\n img = Image.objects.get(identifier=self.identifier)\n es_id = img.id\n es.update(\n index='image',\n id=es_id,\n body={'doc': {'mature': False}}\n )\n super(MatureImage, self).delete(*args, **kwargs)\n\n\nclass ImageList(AbstractMediaList):\n images = models.ManyToManyField(\n Image,\n related_name=\"lists\",\n help_text=\"A list of identifier keys corresponding to images.\"\n )\n\n class Meta:\n db_table = 'imagelist'\n\n def save(self, *args, **kwargs):\n self.slug = uuslug(self.title, instance=self)\n super(ImageList, self).save(*args, **kwargs)\n","sub_path":"openverse-api/catalog/api/models/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"7371235","text":"import tornado\nfrom core.logic import User\n\n\nclass BaseView(tornado.web.RequestHandler):\n def get_current_user(self):\n #mock signed user\n user_id = 2\n if not user_id:\n return None\n\n user_obj_list = User.filter(id=user_id)\n if len(user_obj_list) > 0:\n return user_obj_list[0]\n else:\n return None\n\n def message(self, message='', redirect_to='/'):\n data = {'message': message, 'redirect_to': redirect_to}\n self.render(\"message.html\", data=data)\n","sub_path":"www/views/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"233419554","text":"import sqlite3, sys\n\ndef get_mails(file):\n conn = sqlite3.connect('emaildb.sqlite')\n cur = conn.cursor()\n\n cur.execute(\"DROP TABLE IF EXISTS Counts\")\n cur.execute(\"\"\"CREATE TABLE Counts(\n email VARCHAR(20),\n count INTEGER\n )\"\"\")\n\n with open(file) as f:\n for line in f:\n if not line.startswith(\"From:\"): continue\n data = line.split()[1]\n\n cur.execute(\"SELECT email FROM Counts WHERE email=?\", (data, ))\n if cur.fetchone() == None:\n cur.execute(\"INSERT INTO Counts (email, count) VALUES (?, 1)\", (data, ))\n else:\n cur.execute(\"UPDATE Counts SET count = count + 1 WHERE email = ?\", (data, ))\n conn.commit()\n\n\n for n in cur.execute(\"SELECT * FROM Counts ORDER BY count DESC LIMIT 5\"):\n print(\"Email: {} appears: {} times\".format(n[0], n[1]))\n cur.close()\n\nif __name__ == \"__main__\":\n get_mails(sys.argv[1])\n","sub_path":"Databases/My First Database/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"590912969","text":"from spamc import SpamC, exceptions\nimport logging\n\n\nlogger = logging.getLogger('surgat.SAConnector')\n\n\ndef spamd_headers_for_message(data):\n rv = []\n is_spam = data.get('isspam', False)\n for k in data.get('headers', []):\n if not is_spam and k not in ['X-Spam-Status', 'X-Spam-Checker-Version']:\n continue\n if not k.startswith('X-Spam') or k == 'Subject':\n continue\n rv.append('{}: {}'.format(k, data['headers'][k]))\n return rv\n\n\nclass SAConnector(object):\n def __init__(self, server='localhost', port=783, user=None):\n \"\"\" A connection to spamd for the prvided user. \"\"\"\n logger.info(\"Establishing a connection to spamd...\")\n self.client = SpamC(server, port, user=user)\n\n def check_ping(self):\n try:\n self.client.ping()\n except exceptions.SpamCError:\n return False\n return True\n\n def check(self, msg):\n \"\"\" Actually do the check of the message.\n :param msg: Message body to check...\n :return: {'result': True/False, 'headers': spam-headers, 'basescore': n.n, 'score': n.n}\n \"\"\"\n ck = self.client.headers(msg)\n if ck['code'] != 0:\n return {'code': ck.get('code')}\n return {'result': True if ck.get('isspam', False) else False,\n 'basescore': ck.get('basescore'),\n 'code': ck.get('code'),\n 'score': ck.get('score'),\n 'headers': spamd_headers_for_message(ck)}\n\n def rule_list(self, msg):\n ck = self.client.symbols(msg)\n if ck.get('code') == 0:\n return ck.get('symbols')\n return []\n","sub_path":"surgat/connector.py","file_name":"connector.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"145921749","text":"# 数据降维:奇异值分解(SVD)、主成分分析(PCA)、因子分析(FA)、独立成分分析(ICA)\n# 降低特征的维度,去除不重要的特征\nimport os\nimport cv2\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# 分离某一颜色的物体\ndef color_seperate(img):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # 色彩空间转换为hsv,便于分离\n lower_hsv = np.array([100, 43, 46]) # 提取颜色的低值\n high_hsv = np.array([124, 255, 255]) # 提取颜色的高值\n # inRange()函数可实现二值化功能,可以同时针对多通道进行操作\n # inRange(InputArray src, InputArray lowerb, InputArray upperb, OutputArray dst)\n # src:输入图像, lowerb:要提取颜色在hsv色彩空间取值的低值, upperb:要提取颜色在hsv色彩空间取值的高值, 输出图\n image = cv2.inRange(hsv, lowerb=lower_hsv, upperb=high_hsv)\n # image = cv2.cvtColor(mask, cv2.COLOR_HSV2BGR)\n cv2.imshow(\"img\",image)\n cv2.waitKey(0)\n\n return image\n\n# 主函数\ndef main():\n img = cv2.imread(\"test/test1.jpg\")\n print(img.shape)\n # r, g, b = cv2.split(img) # 图像的拆分,将彩色图像划分为三种颜色\n # img = cv2.merge([r, g, b]) # 将三种颜色通道的图片融合\n # img[:,:,0] = img[:,:,0]*0\n # img[:,:,1] = img[:,:,1]*0\n # img[:,:,2] = img[:,:,2]*1\n # (1)调整图像大小\n crop_size = (500, 300)\n img = cv2.resize(img, crop_size, interpolation=cv2.INTER_CUBIC)\n # (2)灰度化\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n #*******************************************************************************************************************#\n # (3)基于特征值分解协方差矩阵(X: m*n,样本数量为n,特征个数为m,将其降到k维)\n # (3.1)去平均值(即去中心化),即每一个样本特征减去所有样本该特征的平均值\n # (3.2)计算协方差矩阵(X*X')/n\n # (3.3)计算协方差矩阵(X*X')/n的特征值与特征向量(列向量m*1,标准化)(当样本数多、样本特征数也多的时候,计算量很大)\n # (3.4)对特征值从大到小排序,选择其中最大的k个,然后将其对应的k个特征向量分别作为“行向量”(列向量转置k*1*m)组成特征向量矩阵P\n # (3.5)将数据转换到k个特征向量构建的新空间中,即Y=PX(Y: k*n; P: k*m; X: m*n)\n #*******************************************************************************************************************#\n # (4)SVD分解(PCA仅仅使用了我们SVD的左奇异矩阵,没有使用到右奇异值矩阵)\n # (4.1)去平均值(即去中心化),即每一个样本特征减去所有样本该特征的平均值\n # (4.2)计算协方差矩阵(X*X')/n\n # (4.3)通过SVD计算协方差矩阵(X*X')/n的特征值与特征向量(唯一的不同)(不用做特征分解,该方法在样本量很大的时候很有效)\n # (4.4)对特征值从大到小排序,选择其中最大的k个,然后将其对应的k个特征向量分别作为“行向量”(列向量转置k*1*m)组成特征向量矩阵P\n # (4.5)将数据转换到k个特征向量构建的新空间中,即Y=PX(Y: k*n; P: k*m; X: m*n)\n #*******************************************************************************************************************#\n # (5)SVD方法:任意矩阵的满秩分解,用满秩分解可以对数据做压缩,A(m*n) = U(m,k)*sigema(k,k)*V'(k*n)\n # 对称阵的性质:它总能相似对角化,对称阵不同特征值对应的特征向量两两正交\n # 对称阵特征向量两两正交,U为正交阵,正交阵的逆矩阵等于其转置\n # vi为A'A(n*m x m*n = n*n)的特征向量(最大的k个特征向量组成的k*n的矩阵V'),称为A的右奇异向量,ui=Avi实际上为AA'的特征向量,称为A的左奇异向量\n # A = X*Y为A的满秩分解(X = U(n,1:k)*sigema(1:k,1:k); Y = V(1:k,n))\n # 左奇异矩阵可以用于对行数的压缩(m*n -> k*n);右奇异矩阵可以用于对列(即特征维度)的压缩(m*n -> m*k)\n U,S,V = np.linalg.svd(gray)\n # x = np.linspace(0, len(S), len(S))\n # y = list(S)\n # plt.plot(x, y, ls=\"-\", lw=2, label=\"plot figure\")\n # plt.legend()\n # plt.show()\n # 重构(小特征值更容易在两级(白色与黑色)封闭区域内聚集)\n k = np.sum(S > 0.005*max(S))\n # 动态调整特征值大小,突出某些局部特征\n for k in range(len(S)):\n B = np.dot(U[:,:k], np.diag(S[:k])).dot(V[:k,:])\n C = np.rint(B).astype('uint8')\n # C = cv2.blur(C, (3, 5))\n # C = cv2.GaussianBlur(C, (5, 3), 0)\n C = cv2.medianBlur(C, 5)\n # 9:滤波领域直径,75:空间高斯函数标准差,75:灰度值相似性标准差\n # C = cv2.bilateralFilter(C, 5, 75, 75)\n print(k,round(S[k],2), np.sum(C == 255))\n # plt.ion()\n # plt.imshow(C)\n # plt.pause(0.05)\n # plt.close()\n cv2.imshow(\"img\", C)\n cv2.waitKey(50)\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"data_dimension_reduction/data_dimension_reduction.py","file_name":"data_dimension_reduction.py","file_ext":"py","file_size_in_byte":5052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"418739921","text":"'''\nScript to extract 3D bounding boxes from the 2D slices.\n\nWritten by Luca Derumier.\nVersion 1.0 - May 2020.\n'''\nimport argparse\nimport os\nimport numpy as np\nimport re\n\nfrom config import Config\nfrom utils import *\n\n#########################################################\n################### Parsing arguments ###################\n#########################################################\n\nargparser = argparse.ArgumentParser(\n description=\"Evaluates the detection system on custom data.\")\n\nargparser.add_argument(\n '-d',\n '--data_path',\n help=\"path to the folder containing the data folders.\",\n default=os.path.join('pelvis_scan','FULL_IMAGES_CT'))\n\nargparser.add_argument(\n '-i',\n '--image',\n help=\"name of the folder that contains all the 2D slices and ground truth annotations.\",\n default='charleroi_1')\n\nargparser.add_argument(\n '-p',\n '--pred',\n help=\"path to the prediction file.\",\n default=os.path.join('predictions','pred_boxes.p'))\n\nargparser.add_argument(\n '-s',\n '--scale',\n help=\"scaling factor for the bounding box.\",\n default='0')\n\nargparser.add_argument(\n '-l',\n '--limit',\n help=\"scaling factor limit for the bounding box.\",\n default='20')\n\nargparser.add_argument(\n '-G',\n '--graphs',\n help=\"enables graph mode.\",\n action='store_true')\n\nargparser.add_argument(\n '-E',\n '--each',\n help=\"enables independent organ extraction mode.\",\n action='store_true')\n\n\n\n########################################################\n######################### Main #########################\n########################################################\n\ndef _main(args):\n # Raw arguments from parser\n data_path = args.data_path\n image_folder = args.image\n pred_path = args.pred\n scaling = int(args.scale)\n limit = int(args.limit)\n g_mode = args.graphs\n each = args.each\n\n # Config instance and scaling ratio\n config = Config()\n ratio = (config.INPUT_DIM[0]/config.OUTPUT_DIM[0])\n\n # Checking good composition of the image folders\n contentChecker(data_path,pred_path)\n\n # Computes average box\n #save_annotation(averageBox('/Volumes/LUCA_EHD/TFE_DATA/USABLE_DATASETS/FULL_IMAGES_CT',[5,18,27,35,45,59,61,62,65,71,75,89,91]),os.path.join(data_path,'average_box_training.p'))\n #average_box = [int(x) for x in load_annotation(os.path.join(data_path,'average_box_training.p'))]\n #average_comparison(average_box,data_path,pred_path,ratio=ratio,scaling = 0,write=True)\n\n # Study scaling impact\n if g_mode:\n metrics_graphs(data_path,os.path.join(data_path,'extract_scaling.p'))\n elif each:\n metrics = extract_each(data_path,pred_path,ratio=ratio,write=True)\n else:\n metrics = extract_scaling(data_path,pred_path,ratio=ratio,limit=limit,write=True)\n save_annotation(metrics,os.path.join(data_path,'extract_scaling.p'))\n\n\n############################################################\n################### Extraction functions ###################\n############################################################\ndef average_comparison(average_box,path,pred_path,ratio=500/416,scaling = 0,write=False):\n '''Computes the average IoU between the average training gt box and test gt box against predictions.\n\n Inputs:\n average_box: the average box position from training set.\n path: the path to the images folders.\n pred_path: the path to the predictions file that has to be stored in the image folder.\n\n Returns:\n IoUs: the list containing IoU of ground truth and predictions.\n '''\n GTs = []\n preds = []\n IoUs = []\n dir_list = [x for x in os.listdir(path) if x.startswith('charleroi')]\n for dir in dir_list:\n\n # Load prediction and ground truth annotations\n pred_boxes = load_annotation(os.path.join(path,dir,pred_path))\n true_boxes = load_annotation(os.path.join(path,dir,'boxes.p'))\n gt_box = true_boxes['all']\n\n # Computes total 3D box for predicted boxes\n pred_box_dilated = extract(merge_boxes(pred_boxes))\n pred_box = [x*ratio for x in pred_box_dilated]\n\n if scaling > 0:\n pred_box = [pred_box[0]-scaling, pred_box[1]-scaling, pred_box[2]+scaling, pred_box[3]+scaling]\n\n # Computes and stores IoU and area of the box\n GTs.append(compute_IoU(average_box,gt_box))\n preds.append(compute_IoU(average_box,pred_box))\n IoUs.append(compute_IoU(pred_box,gt_box))\n\n if write:\n f = open(os.path.join(path,'compare.txt'),'w+')\n for i in range(len(GTs)):\n f.write('patient {} : {:.3f} | {:.3f} | {:.3f}\\n\\n'.format(i,GTs[i],preds[i],IoUs[i]))\n f.write('average : {} | {} | {}\\n\\n'.format(np.mean(GTs),np.mean(preds),np.mean(IoUs)))\n f.write('std : {} | {} | {}\\n\\n'.format(np.std(GTs),np.std(preds),np.std(IoUs)))\n f.close()\n\n return [GTs,preds,IoUs]\n\ndef extract_scaling(path,pred_path,ratio=500/416,input_dim=[500,500],limit=20,write=False):\n '''Extract 3D boxes and computes all metrics for scaling factor in the range [0;limit]\n\n Inputs:\n path: the path to the images folders.\n pred_path: the path to the predictions file that has to be stored in the image folder.\n\n Returns:\n metrics: the dictionnary containing the mean and variance of the metrics for 3D boxes for each scaling factor.\n '''\n\n metrics = {}\n for s in range(0,limit+1):\n m = extract_all(path,pred_path,ratio=ratio,input_dim=input_dim,scaling=s)\n metrics.update({s : m})\n\n if write:\n f = open(os.path.join(path,'extract_stats_'+str(s)+'.txt'),'w+')\n for key,item in m.items():\n f.write('{} : {}\\n\\n'.format(key,item))\n f.close()\n\n return metrics\n\ndef extract_all(path,pred_path,ratio=500/416,input_dim=[500,500],scaling=0):\n '''Extract 3D boxes and computes IoU compared to ground truth of all the images contained in path along with other metrics.\n\n Inputs:\n path: the path to the images folders.\n pred_path: the path to the predictions file that has to be stored in the image folder.\n\n Returns:\n metrics: the dictionnary containing the mean and variance of the metrics for 3D boxes.\n\n '''\n\n dir_list = [x for x in os.listdir(path) if x.startswith('charleroi')]\n\n IoUs = []\n areas = []\n normal_area = []\n TPs = []\n TNs = []\n FPs = []\n FNs = []\n precs = []\n recs = []\n FNRs = []\n\n\n # Computation of all IoUs and areas phase\n for dir in dir_list:\n # Load prediction and ground truth annotations\n pred_boxes = load_annotation(os.path.join(path,dir,pred_path))\n true_boxes = load_annotation(os.path.join(path,dir,'boxes.p'))\n gt_box = true_boxes['all']\n\n # Computes total 3D box for predicted boxes\n pred_box_dilated = extract(merge_boxes(pred_boxes))\n pred_box = [x*ratio for x in pred_box_dilated]\n if scaling > 0:\n pred_box = [pred_box[0]-scaling, pred_box[1]-scaling, pred_box[2]+scaling, pred_box[3]+scaling]\n\n # Computes and stores IoU and area of the box\n IoU = compute_IoU(pred_box,gt_box)\n IoUs.append(IoU)\n\n (TP,TN,FP,FN,precision,recall,FNR) = compute_classification(gt_box,pred_box,input_dim)\n TPs.append(TP)\n TNs.append(TN)\n FPs.append(FP)\n FNs.append(FN)\n precs.append(precision)\n recs.append(recall)\n FNRs.append(FNR)\n\n area = (pred_box[2]-pred_box[0])*(pred_box[3]-pred_box[1]) #(xB-xA)*(yB-yA)\n areas.append(area)\n normal_area.append(area/(input_dim[0]*input_dim[1]))\n\n\n\n # Extracting mean and variance\n final_iou = {'mean': np.mean(IoUs), 'variance': np.std(IoUs)}\n final_area = {'mean': np.mean(areas), 'variance': np.std(areas)}\n final_normal_area = {'mean': np.mean(normal_area), 'variance': np.std(normal_area)}\n final_TP = {'mean': np.mean(TPs), 'variance': np.std(TPs)}\n final_TN = {'mean': np.mean(TNs), 'variance': np.std(TNs)}\n final_FP = {'mean': np.mean(FPs), 'variance': np.std(FPs)}\n final_FN = {'mean': np.mean(FNs), 'variance': np.std(FNs)}\n final_precision = {'mean': np.mean(precs), 'variance': np.std(precs)}\n final_recall = {'mean': np.mean(recs), 'variance': np.std(recs)}\n final_FNR = {'mean': np.mean(FNRs), 'variance': np.std(FNRs)}\n\n metrics = {'IoU' : final_iou, 'area' : final_area, 'norm_area' : final_normal_area, 'TP' : final_TP, 'TN' : final_TN, 'FP': final_FP, 'FN': final_FN, 'precision' : final_precision, 'recall' : final_recall, 'FNR' : final_FNR}\n\n return metrics\n\ndef extract_each(path,pred_path,ratio=500/416,input_dim=[500,500],scaling=0,write=False):\n '''Extract 3D boxes and computes IoU compared to ground truth of all the images (for each organ)) contained in path along with other metrics.\n\n Inputs:\n path: the path to the images folders.\n pred_path: the path to the predictions file that has to be stored in the image folder.\n\n Returns:\n metrics: the dictionnary containing the mean and variance of the metrics for 3D boxes.\n\n '''\n\n dir_list = [x for x in os.listdir(path) if x.startswith('charleroi')]\n\n metrics = {'bladder' : {'IoU' : [], 'precision' : [], 'recall' : [], 'FNR' : []},\n 'rectum' : {'IoU' : [], 'precision' : [], 'recall' : [], 'FNR' : []},\n 'prostate' : {'IoU' : [], 'precision' : [], 'recall' : [], 'FNR' : []},\n 'average' : {'IoU' : [], 'precision' : [], 'recall' : [], 'FNR' : []}}\n\n\n # Computation of all IoUs and areas phase\n for dir in dir_list:\n\n # Load prediction and ground truth annotations\n pred_boxes = merge_each(load_annotation(os.path.join(path,dir,pred_path)))\n true_boxes = merge_each(format(load_annotation(os.path.join(path,dir,'boxes.p'))))\n\n for organ,box in pred_boxes.items():\n pred_box = [x*ratio for x in box][:4]\n\n if scaling > 0:\n pred_box = [pred_box[0]-scaling, pred_box[1]-scaling, pred_box[2]+scaling, pred_box[3]+scaling]\n\n IoU = compute_IoU(pred_box,true_boxes[organ][:4])\n metrics[organ]['IoU'].append(IoU)\n metrics['average']['IoU'].append(IoU)\n (TP,TN,FP,FN,precision,recall,FNR) = compute_classification(true_boxes[organ][:4],pred_box,input_dim)\n metrics[organ]['precision'].append(precision)\n metrics['average']['precision'].append(precision)\n metrics[organ]['recall'].append(recall)\n metrics['average']['recall'].append(recall)\n metrics[organ]['FNR'].append(FNR)\n metrics['average']['FNR'].append(FNR)\n\n if write:\n f = open(os.path.join(path,'organ_stats.txt'),'w+')\n f.write('organ - iou - precision - recall - fnr\\n\\n')\n for key,item in metrics.items():\n f.write('{} : {:.3f}+{:.3f} | {:.3f}+{:.3f} | {:.3f}+{:.3f} | {:.3f}+{:.3f}\\n\\n'.format(key,np.mean(item['IoU']),np.std(item['IoU']),np.mean(item['precision']),np.std(item['precision']),np.mean(item['recall']),np.std(item['recall']),np.mean(item['FNR']),np.std(item['FNR'])))\n f.close()\n\n return metrics\n\ndef merge_boxes(pred_boxes):\n '''Merge the organ boxes into a dictionnary.\n\n Inputs:\n pred_boxes: the dictionnary containing the bounding boxes and scores.\n pred_boxes = {filename : {'bladder': [[xA,yA,xB,yB,score],[...]],\n 'rectum': [..],\n 'prostate': [..]},\n filename : {...},...}\n\n Returns:\n total_boxes: the dictionnary containing the bounding boxes that and scores that contain all the organs of each slide.\n total_boxes = {filename1 : [xA,yA,xB,yB,score],\n filename2 : [xA,yA,xB,yB,score],...}\n\n '''\n\n total_boxes = {}\n for filename,organs in pred_boxes.items():\n # New coordinates\n xA = 10000\n yA = 10000\n xB = -1\n yB = -1\n conf = 1\n\n # Extract the\n for organ,boxes in organs.items():\n if len(boxes) > 0:\n\n # Sort the boxes if there is more than 1 prediction\n if len(boxes) > 1:\n # Sort from least confident to most confident\n boxes.sort(key=lambda x:x[4])\n\n # Keep only box that has the best confidence score if this score is above 0.5\n box = boxes[-1]\n if(box[-1] > 0.5):\n xA = min(xA,box[0])\n yA = min(yA,box[1])\n xB = max(xB,box[2])\n yB = max(yB,box[3])\n conf = min(conf,box[4])\n\n # Adds the bounding box to the new dictionnary if it has been update\n if(xA != 10000 and yA != 10000 and xB > 0 and yB > 0):\n total_boxes.update({filename : [xA,yA,xB,yB,conf]})\n\n return total_boxes\n\ndef merge_each(pred_boxes):\n '''Merge the organ boxes into a dictionnary by organ.\n\n Inputs:\n pred_boxes: the dictionnary containing the bounding boxes and scores.\n pred_boxes = {filename : {'bladder': [[xA,yA,xB,yB,score],[...]],\n 'rectum': [..],\n 'prostate': [..]},\n filename : {...},...}\n\n Returns:\n total_boxes: the dictionnary containing the bounding boxes that and scores that contain all the organs of each slide.\n organ_boxes = {'bladder' : [xA,yA,xB,yB,score], 'rectum' :[xA,yA,xB,yB,score], 'prostate' : [xA,yA,xB,yB,score]}\n\n '''\n # New coordinates\n xA = 10000\n yA = 10000\n xB = -1\n yB = -1\n conf = 1\n\n organ_boxes = {'bladder': [xA,yA,xB,yB,conf],'rectum': [xA,yA,xB,yB,conf],'prostate': [xA,yA,xB,yB,conf]}\n for filename,organs in pred_boxes.items():\n # Extract the boxes\n for organ,boxes in organs.items():\n if len(boxes) > 0:\n # Sort the boxes if there is more than 1 prediction\n if len(boxes) > 1:\n # Sort from least confident to most confident\n boxes.sort(key=lambda x:x[4])\n\n # Keep only box that has the best confidence score if this score is above 0.5\n box = boxes[-1]\n if(box[-1] > 0.5):\n organ_boxes[organ][0] = min(organ_boxes[organ][0],box[0]) # xA\n organ_boxes[organ][1] = min(organ_boxes[organ][1],box[1]) # yA\n organ_boxes[organ][2] = max(organ_boxes[organ][2],box[2]) # xB\n organ_boxes[organ][3] = max(organ_boxes[organ][3],box[3]) # yB\n organ_boxes[organ][4] = min(organ_boxes[organ][4],box[4]) # conf\n\n\n return organ_boxes\n\ndef format(true_boxes):\n '''Transform the true boxes dictionnary to have a pred boxes format.\n\n Inputs:\n true_boxes: the dictionnary containing the ground truth bounding boxes.\n total_boxes = {filename1 : {'bb' : {'bladder' : [xA,yA,xB,yB], 'prostate' : [], ..}, 'shape': (....)},\n filename2 : {'bb' : {'bladder' : [xA,yA,xB,yB], 'prostate' : [], ..}, 'shape': (....)}}\n\n Returns:\n boxes: the reformated dictionnary\n boxes = {filename1 : {'bladder': [[xA,yA,xB,yB,score]],\n 'rectum': [..],\n 'prostate': [..]},\n filename : {...},...}\n '''\n boxes = {}\n for file in true_boxes.keys():\n boxes.update({file : {'bladder' : [], 'rectum': [], 'prostate':[]}})\n if type(true_boxes[file]) is dict :\n for organ,box in true_boxes[file]['bb'].items():\n\n if organ in boxes[file].keys():\n boxes[file][organ].append(box + [0.99])\n return boxes\n\n\ndef extract(total_boxes):\n '''Extract one bounding box that holds every others inside it.\n\n Inputs:\n total_boxes: the dictionnary containing the bounding boxes that and scores that contain all the organs of each slide.\n total_boxes = {filename1 : [xA,yA,xB,yB,score],\n filename2 : [xA,yA,xB,yB,score],...}\n\n Returns:\n [xA,yA,xB,yB]: the final box coordinates for the 3D image\n\n '''\n\n xA = 10000\n yA = 10000\n xB = -1\n yB = -1\n\n for filename,box in total_boxes.items():\n # New coordinates\n xA = min(xA,box[0])\n yA = min(yA,box[1])\n xB = max(xB,box[2])\n yB = max(yB,box[3])\n\n if(xA >= 10000 or yA >= 10000 or xB < 0 or yB < 0):\n raise ValueError('Some coordinates were never (or wrongly) updated during the final box extraction.')\n\n return [xA,yA,xB,yB]\n\ndef averageBox(path,ignore):\n '''Extract the average box of the training set.\n\n Inputs:\n path: path to the folder that contains the images.\n ignore: numbers to ignore (data from validation and test set).\n\n Returns:\n [xA,yA,xB,yB]: the final box coordinates for the 3D image.\n\n '''\n\n dir_list = [x for x in os.listdir(path) if x.startswith('charleroi')]\n xAs = []\n yAs = []\n xBs = []\n yBs = []\n for file in dir_list:\n if not (int(re.findall('\\d+', file)[0]) in ignore):\n box = load_annotation(os.path.join(path,file,'boxes.p'))\n xAs.append(box['all'][0])\n yAs.append(box['all'][1])\n xBs.append(box['all'][2])\n yBs.append(box['all'][3])\n\n return [np.mean(xAs),np.mean(yAs),np.mean(xBs),np.mean(yBs)]\n\n#########################################################\n################### Utility functions ###################\n#########################################################\ndef contentChecker(path,pred_path):\n '''Checks that all image folders in path have predictions and ground truth data.\n\n Inputs:\n path: the path to the images folders.\n pred_path: the path to the predictions file that has to be stored in the image folder.\n\n '''\n\n dir_list = [x for x in os.listdir(path) if x.startswith('charleroi')]\n\n # Computation of all IoUs and areas phase\n for dir in dir_list:\n if(not os.path.exists(os.path.join(path,dir,pred_path))):\n print('Missing predictions for {}'.format(dir))\n if(not os.path.exists(os.path.join(path,dir,'boxes.p'))):\n print('Missing ground truth for {}'.format(dir))\n\ndef metrics_graphs(path,extract_path):\n '''Plots graphs of the metrics in terms of scaling factor.\n\n Inputs:\n path: the path to the images folders.\n pred_path: the path to the file that stores the extraction results.\n\n '''\n\n metrics = load_annotation(extract_path)\n iou = np.zeros((2,len(metrics)))\n precision = np.zeros((2,len(metrics)))\n recall = np.zeros((2,len(metrics)))\n fnr = np.zeros((2,len(metrics)))\n area = np.zeros((2,len(metrics)))\n\n for s,m in metrics.items():\n iou[0][s] = m['IoU']['mean']\n iou[1][s] = m['IoU']['variance']\n precision[0][s] = m['precision']['mean']\n precision[1][s] = m['precision']['variance']\n recall[0][s] = m['recall']['mean']\n recall[1][s] = m['recall']['variance']\n fnr[0][s] = m['FNR']['mean']\n fnr[1][s] = m['FNR']['variance']\n area[0][s] = m['norm_area']['mean']\n area[1][s] = m['norm_area']['variance']\n\n if(m['FNR']['mean'] == 0.0):\n max = s\n break\n\n data_metrics = [iou[0][0:s+1],precision[0][0:s+1],recall[0][0:s+1],fnr[0][0:s+1]]\n var_metrics = [iou[1][0:s+1],precision[1][0:s+1],recall[1][0:s+1],fnr[1][0:s+1]]\n extract_graph(data_metrics,var_metrics,list(range(s+1)),['IoU','precision','recall','fnr'],['mediumpurple','darkseagreen','darkorange','tomato'],ylabel='',title='',save=True,path=os.path.join(path,'extract_metrics.pdf'))\n extract_graph([area[0][0:s+1]],[area[1][0:s+1]],list(range(s+1)),['volume ratio'],['royalblue'],ylabel='volume ratio',title='',save=True,path=os.path.join(path,'extract_GPU.pdf'))\n #av_64 = [x*578 for x in area[0][0:s+1]]\n im = [(x*578)/8 for x in area[0][0:s+1]]\n mask = [(x*1729)/8 for x in area[0][0:s+1]]\n #max = [x*1058 for x in area[0][0:s+1]]\n #min = [x*431 for x in area[0][0:s+1]]\n extract_graph([im,mask],[np.zeros((s+1)),np.zeros((s+1))],list(range(s+1)),['image','mask'],['royalblue','red'],ylabel='data size in MB',title='',save=True,path=os.path.join(path,'extract_size_images.pdf'),mem_lines=True)\n #av_64 = [x*1729 for x in area[0][0:s+1]]\n #av_8 = [(x*1729)/8 for x in area[0][0:s+1]]\n #max = [x*3175 for x in area[0][0:s+1]]\n #min = [x*1293 for x in area[0][0:s+1]]\n #extract_graph([av_64,av_8],[np.zeros((s+1)),np.zeros((s+1))],list(range(s+1)),['uint64','uint8'],['steelblue','darkseagreen'],ylabel='data size in MB',title='',save=True,path=os.path.join(path,'extract_size_mask.pdf'))\n\n\n\n############################################\n################### Main ###################\n############################################\n\nif __name__ == '__main__':\n args = argparser.parse_args()\n _main(args)\n","sub_path":"pelvis_yolo/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":21297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"95992033","text":"import torch.optim as optim\nfrom src.model import *\nfrom src.data import *\nfrom src.graph import draw_accuracy_graph\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n# Load Training data\nbatch_size = 128\ntraining_set_loader = CIFAR10DataLoader().get_data_loader(data_set='training', batch_size=batch_size)\n\n# Initialize Convolution Neural Network\ncnn = WideResNet()\nprint(cnn)\nprint(cnn.parameters())\n\n# Define Loss function\nloss_func = nn.CrossEntropyLoss()\n# Initialize Optimizer\noptimizer = optim.SGD(cnn.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)\n\nif torch.cuda.is_available():\n cnn = cnn.cuda()\n\n# Train network model\n# Note: only TRAINING set will be used to train the network\nplt_x = []\nplt_train_y = []\nfor epoch in range(200):\n print('Epoch = ', epoch + 1)\n running_loss = 0.0\n mini_batch = 0\n # Increase learning rate and weight_decay correspondingly\n if epoch == 50:\n optimizer = optim.SGD(cnn.parameters(), lr=0.02, momentum=0.9, weight_decay=5e-4)\n if epoch == 80:\n optimizer = optim.SGD(cnn.parameters(), lr=0.004, momentum=0.9, weight_decay=5e-4)\n if epoch == 120:\n optimizer = optim.SGD(cnn.parameters(), lr=0.0008, momentum=0.9, weight_decay=1e-3)\n for data in training_set_loader:\n mini_batch += 1\n images, labels = data\n images = images.to(device)\n labels = labels.to(device)\n prediction = cnn(images)\n loss = loss_func(prediction, labels)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n running_loss += loss.item()\n # Output Cross-entropy Loss twice per epoch\n check_per_batch = int(50000 / batch_size / 2)\n if mini_batch % check_per_batch == check_per_batch - 1:\n print('[%d, %5d] loss: %.5f' % (epoch + 1, mini_batch + 1, running_loss / check_per_batch))\n running_loss = 0.0\n # Training Accuracy Evaluation\n plt_x.append(epoch + 1)\n correct = 0\n total = 0\n with torch.no_grad():\n for data in training_set_loader:\n images, labels = data\n images = images.to(device)\n labels = labels.to(device)\n outputs = cnn(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n training_set_accuracy = 100 * correct / total\n plt_train_y.append(training_set_accuracy)\n print('Training Accuracy of the model: %.4f %%' % training_set_accuracy)\n if epoch % 5 == 4:\n torch.save(cnn, './model/Wide_ResNet_batch-size={}_epo_{}'.format(batch_size, epoch + 1))\nprint('Finished Training\\n\\n')\n\n# Show Training Accuracy result\nprint('Training Accuracy: ')\nprint(plt_x)\nprint(plt_train_y)\ndraw_accuracy_graph(plt_x, plt_train_y)\n","sub_path":"src/train_wideresnet.py","file_name":"train_wideresnet.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"326358220","text":"from __future__ import unicode_literals\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.urls import reverse\nfrom .models import Users,Trips,Plans\nfrom datetime import datetime\nimport bcrypt\n\ndef index(request):\n return render(request,'travelBuddy/index.html')\n\ndef register(request):\n errors = Users.objects.validator(request.POST)\n if len(errors):\n for tag,error in errors.iteritems():\n messages.error(request,error,extra_tags=tag)\n return redirect('index')\n else:\n hashedPW = bcrypt.hashpw(request.POST['password'].encode(),bcrypt.gensalt())\n Users.objects.create(name=request.POST['name'],userName=request.POST['userName'],password=hashedPW)\n user = Users.objects.get(userName=request.POST['userName'])\n request.session['name'] = user.name\n request.session['userName'] = user.userName\n request.session['userID'] = user.id\n return redirect('travel')\n\ndef login(request):\n errors = Users.objects.loginValid(request.POST)\n if len(errors):\n for tag,error in errors.iteritems():\n messages.error(request,error,extra_tags=tag)\n return redirect('index')\n else:\n user = Users.objects.get(userName=request.POST['userName'])\n request.session['name'] = user.name\n request.session['userName'] = user.userName\n request.session['userID'] = user.id\n return redirect('travel')\n\ndef travel(request):\n userTrips = Trips.objects.filter(joined=request.session['userID'])|Trips.objects.filter(organized=request.session['userID'])\n otherTrips = Trips.objects.exclude(organized=Users.objects.get(id=request.session['userID'])).exclude(joined=Users.objects.get(id=request.session['userID']))\n context = {\n 'userTrips':userTrips,\n 'otherTrips':otherTrips\n }\n return render(request,'travelBuddy/travel.html',context)\n\ndef addTravel(request):\n return render(request,'travelBuddy/addTravel.html')\n\ndef submitTrip(request):\n errors = Trips.objects.tripValid(request.POST)\n if len(errors):\n for tag,error in errors.iteritems():\n messages.error(request,error,extra_tags=tag)\n return redirect('addTravel')\n else:\n Trips.objects.create(destination=request.POST['destination'],plans=request.POST['description'],startDate=datetime.strptime(request.POST['travelFrom'],'%Y-%m-%d'),endDate=datetime.strptime(request.POST['travelTo'],'%Y-%m-%d'),organized=Users.objects.get(id=request.session['userID']))\n trip = Trips.objects.get(destination=request.POST['destination'],startDate=datetime.strptime(request.POST['travelFrom'],'%Y-%m-%d'),endDate=datetime.strptime(request.POST['travelTo'],'%Y-%m-%d'),organized=Users.objects.get(id=request.session['userID']))\n return redirect('travel')\n\ndef joinTrip(request,id):\n Plans.objects.create(trip=Trips.objects.get(id=id),join=Users.objects.get(id=request.session['userID']))\n return redirect('travel')\n\ndef trip(request,id):\n trip = Trips.objects.get(id=id)\n plans = Plans.objects.filter(trip=trip)\n context = {\n 'trip':trip,\n 'plans':plans\n }\n return render(request,'travelBuddy/trip.html',context)\n\ndef logout(request):\n request.session.clear()\n return redirect('index')\n","sub_path":"apps/travelBuddy/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"462753045","text":"def isMatch(s, p):\n if not p:\n return not s\n\n if '*' not in p and len(s) != len(p):\n return False\n\n if p == \".*\":\n return True\n\n flag = False\n rp = \"\"\n while p:\n if s:\n print(\"inside S => pat : {} and str : {}\".format(p, s))\n if p[0] in [s[0], '.']:\n flag = False\n if p[1:2] == \"*\":\n s = s[1:]\n else:\n s = s[1:]\n p = p[1:]\n else:\n if p[1:2] == '*' and not flag:\n p = p[2:]\n flag = True\n else:\n print(\"S > here\")\n return False\n else:\n print(\"inside P => pat : {} and str : {}\".format(p, s))\n if not rp:\n rp = p\n print(\"RP : {}\".format(rp))\n if not p[1:2] == \"*\":\n if p[0] == \".\":\n return False\n elif p[0] + \"*\" in rp:\n p = p[1:]\n else:\n print(\"p > here\")\n return False\n else:\n p = p[2:]\n\n if s:\n return False\n\n # pflag = \"\"\n # while p:\n # print(\"Inside P\")\n # print(\"pat : {}\".format(p))\n # if not p[1:2] == \"*\" and not p[0] == pflag:\n # return False\n # else:ans\n # pflag = p[0]\n # p = p[2:]\n\n return True\n\ndef isMatchUsingDp(text, pattern):\n memo = {}\n\n def dp(i, j):\n print(\"i : {} and j : {}\".format(i, j))\n if (i, j) not in memo:\n if j == len(pattern):\n ans = i == len(text)\n print(\"Here\")\n else:\n first_match = i < len(text) and pattern[j] in {text[i], '.'}\n if j + 1 < len(pattern) and pattern[j + 1] == '*':\n ans = dp(i, j + 2) or first_match and dp(i + 1, j)\n else:\n ans = first_match and dp(i + 1, j + 1)\n\n memo[i, j] = ans\n print(memo)\n else:\n print(\"use memo : i : {} and j : {}\".format(i, j))\n return memo[i, j]\n\n print(\"len of text : {} and length of pattern : {}\".format(len(text), len(pattern)))\n return dp(0, 0)\n\n# print(isMatch(\"aaa\", \"a*a\"))\n# print(isMatch(\"aaa\", \"ab*a*c*a\"))\n# print(isMatch(\"mississippi\", \"mis*is*ip*.\"))\n# print(isMatch(\"aa\", \"a*\"))\n# print(isMatch(\"abcd\", \"d*\"))\n# print(isMatch(\"a\", \"ab*a\"))\n# print(isMatch(\"a\", \".*..a*\"))\n# print(isMatch(\"ab\", \".*..\"))\n\nprint(isMatchUsingDp(\"abcd\", \"d*\"))\n","sub_path":"regularexpressionmatching.py","file_name":"regularexpressionmatching.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"92823309","text":"class Product:\n all_products = []\n \n def __init__(self, name, price, category):\n self.name = name\n self.price = price\n self.category = category\n self.id = len(type(self).all_products)\n type(self).all_products.append(self)\n\n def update_price(self, percent_change, is_increased):\n sign = 1\n if not is_increased:\n sign *= -1\n self.price *= 1 + percent_change / 100 * sign\n \n def print_info(self):\n print(f'{self.name}: ${round(float(self.price),2)} - {self.category}')\n\n","sub_path":"product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"555752432","text":"#String Formatting?\n\nname=input()\nschool=input()\nage=int(input())\n\nprint('학교 : {} 이름 : {} 나이 : {}'.format(school,name,age))\n\nname='홍길동'\nschool='성균관'\nage=20\n\nprint('학교 : {} 이름 : {} 나이 : {}'.format(school, name, age))\n\nprint('학교 : {} 이름 : {} 나이 : {}'.format('성균과','홍길동',20))\n\n#이렇게 format을 이용하면 \n#문자열에서 반복적으로 나오거나 직접 입력해야하는 \n#정수, 문자 등을 편하게 출력 할 수 있는 것 같다.","sub_path":"Syntax/String3.py","file_name":"String3.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"166458543","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport asteroids.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('asteroids', '0003_auto_20150416_1109'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Attribute',\n fields=[\n ('id', models.CharField(default=asteroids.models.make_uuid, max_length=36, serialize=False, primary_key=True)),\n ('asteroid', models.ForeignKey(related_name=b'attributes', default=b'', blank=True, to='asteroids.Asteroid', null=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='AttributeKey',\n fields=[\n ('id', models.CharField(default=asteroids.models.make_uuid, max_length=36, serialize=False, primary_key=True)),\n ('name', models.CharField(max_length=500)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='AttributeKeyValue',\n fields=[\n ('id', models.CharField(default=asteroids.models.make_uuid, max_length=36, serialize=False, primary_key=True)),\n ('value', models.TextField()),\n ('attribute', models.ForeignKey(related_name=b'keys_values', default=b'', blank=True, to='asteroids.Attribute', null=True)),\n ('key', models.ForeignKey(related_name=b'keys_values', default=b'', blank=True, to='asteroids.AttributeKey', null=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"asteroids/migrations/0004_attribute_attributekey_attributekeyvalue.py","file_name":"0004_attribute_attributekey_attributekeyvalue.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"595104945","text":"from . import _impl\n\nDEFAULT_ALPHABET = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890\"\nDEFAULT_SEPARATORS = \"cfhistuCFHISTU\"\n\nclass Hashids(object):\n def __init__(self, salt=\"\", min_length=0, alphabet=DEFAULT_ALPHABET, separators=DEFAULT_SEPARATORS):\n \"\"\" Create a `Hashids` object with the requested `salt` and\n `min_length`. \"\"\"\n assert salt is not None, \"salt is required\"\n assert min_length >= 0, \"min_length must be non-negative\"\n\n self._impl = _impl.Hashids(salt, min_length, alphabet, separators)\n self.salt = salt\n self.min_length = min_length\n self.alphabet = alphabet\n self.separators = separators\n\n def encode(self, *values):\n \"\"\" \"\"\"\n try:\n return self._impl.encode(*values)\n except ValueError as err:\n raise ValueError(\"{}; values={}\".format(str(err), values))\n\n def decode(self, slug):\n \"\"\" \"\"\"\n try:\n if len(slug) != 0:\n return self._impl.decode(slug)\n else:\n return ()\n except ValueError as err:\n raise ValueError(\"{}; slug={}\".format(str(err), slug))","sub_path":"chashids/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"327896178","text":"import numpy as np\nimport cameraCalibration as cc\nimport lineDetection as ld\nimport cv2\n\nclass Line():\n def __init__(self, line_type, n_iter, yvals):\n self.n_iter = n_iter\n self.line_type = line_type\n self.yvals = yvals\n self.bestx = None\n self.best_fit = None\n self.fit = []\n self.radius = None\n self.line_pos = None\n\n def update_line(self, image):\n # Overall line update pipeline\n # Find line px, fit them, get parameters\n coord = self.gen_fit_data(image)\n fit = self.fit_line(coord)\n radius, line_pos = self.curve_and_pos(coord, image.shape)\n # sanity check via fit delta\n if len(self.fit) > 1: \n delta_fit = self.best_fit - fit\n else: delta_fit = np.zeros(3)\n if np.dot(delta_fit, delta_fit) < 9999:\n self.fit.append(fit)\n if len(self.fit) > self.n_iter: self.fit.pop(0)\n self.best_fit = np.average(self.fit, axis=0)\n self.bestx = self.polynomial(self.best_fit)\n self.radius = radius\n self.line_pos = line_pos\n\n def guess_center(self, image):\n # Distribution of line pixels along x-direction\n image_frac=4 # fraction of image to ignore\n offset=100 # px value of manual offset if needed\n histogram = np.sum(image[int(image.shape[0] / image_frac):, :], axis=0)\n mid_point = int((histogram.shape[0] - 2 * offset) / 2)\n if self.line_type == 'L':\n index = np.arange(offset, mid_point)\n elif self.line_type == 'R':\n index = np.arange(mid_point,histogram.shape[0] - offset)\n # Calculate position of peak in the histogram using weighted average\n center = np.average(index.astype(int), weights=histogram[index]).astype(int)\n return center\n\n def gen_fit_data(self, image):\n hot_points = []\n peak_width=50 # widest lane line\n nbins=10 # resolution of fit\n Y, X = np.nonzero(image)\n Xy_idx = np.arange(len(Y))\n imsize = image.shape\n bin_size = int(imsize[0] / nbins)\n # Guess approximate location of the line center\n center = self.guess_center(image)\n x_start = center - peak_width\n x_end = center + peak_width\n for nbin in range(nbins):\n index = np.arange(max(0, x_start), min(x_end, imsize[1]))\n # y-direction window start and end\n y_end = imsize[0] - nbin * bin_size\n y_start = y_end - bin_size\n # Distribution of line pixels along x contained in y bin\n histogram = np.sum(image[y_start:y_end, :], axis=0)\n # Calculate line center using weighted average\n try: center = int(np.average(index, weights=histogram[index]))\n except: pass\n # Update scanning window and select points \n x_start = center - peak_width\n x_end = center + peak_width\n idx = Xy_idx[\n (X >= x_start) & (X < x_end) & \\\n (Y >= y_start) & (Y < y_end)]\n hot_points.append(idx)\n # Concatenate hot points found in y-direction scan\n hot_points = np.concatenate(hot_points)\n return X[hot_points], Y[hot_points]\n\n def curve_and_pos(self, position, imsize):\n ym_per_pix = 3 / 70 # m/px y dimension - sketchy estimate\n xm_per_pix = 7 / 1400 # m/px x dimension - seems legit\n yval, xval = imsize\n X, Y = position\n # Convert data from px to m\n fit_cr = np.polyfit(Y * ym_per_pix, X * xm_per_pix, 2)\n curverad = ((1 + (2 * fit_cr[0] * yval * ym_per_pix + fit_cr[1])**2)**1.5) / np.absolute(2 * fit_cr[0])\n abs_line_pos = fit_cr[0] * (yval * ym_per_pix)**2 + fit_cr[1] * yval * ym_per_pix + fit_cr[2]\n line_pos = abs_line_pos - xval * xm_per_pix / 2\n return curverad, line_pos\n\n def fit_line(self, position):\n X, Y = position\n return np.polyfit(Y, X, 2)\n\n def polynomial(self, fit):\n return fit[0] * self.yvals**2 + fit[1] * self.yvals + fit[2]\n\n\nclass Lane(Line):\n \"\"\"\n Uses two line objects (L + R) and provides methods for lane approximation\n \"\"\"\n def __init__(self, mtx, dist, n_iter=10):\n self.yvals = np.linspace(0, 720, 20)\n self.ll = Line('L', n_iter, self.yvals)\n self.rl = Line('R', n_iter, self.yvals)\n self.mtx = mtx\n self.dist = dist\n\n def update_lane(self, image, debug=False):\n undist, warped = self.process_image(image)\n self.ll.update_line(warped)\n self.rl.update_line(warped)\n inv_M = cc.get_transform_matrix(inverse=True)\n result = self.draw_lane(undist, warped, inv_M)\n self.write_curvature_and_position(result)\n return result\n\n def process_image(self, image):\n undist = cv2.undistort(image, self.mtx, self.dist, None, self.mtx)\n imgline = ld.detect_line(undist)\n warped = cc.warp_image(imgline)\n warped[warped > 0] = 1\n return undist, warped\n\n def draw_lane(self, undist, warped, inv_M):\n left_fitx = self.ll.bestx\n right_fitx = self.rl.bestx\n warp_zero = np.zeros_like(warped).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n # Cast x and y into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([left_fitx, self.yvals]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, self.yvals])))])\n pts = np.hstack((pts_left, pts_right))\n # Draw the lane onto the warped blank image\n cv2.fillPoly(color_warp, np.int32([pts]), (0, 255, 0))\n # reverse warp back to OG perspective\n newwarp = cv2.warpPerspective(color_warp, inv_M, (undist.shape[1], undist.shape[0]))\n return cv2.addWeighted(undist, 1, newwarp, 0.3, 0)\n\n def write_curvature_and_position(self, image):\n # Average radius of the left and right lane\n av_radius = (self.ll.radius + self.rl.radius) / 2.\n # Car position wrt lane center\n dist_center = (self.ll.line_pos + self.rl.line_pos) / 2.\n radius_str = 'Road radius: %d m' % av_radius\n if dist_center >=0:\n dist_center_str = '%.2f m left of center' % dist_center\n elif dist_center <0:\n dist_center_str = '%.2f m right of center' % dist_center\n font = cv2.FONT_HERSHEY_TRIPLEX\n cv2.putText(image, radius_str, (50, 75), font, 1.5, (255, 255, 255), 2)\n cv2.putText(image, dist_center_str, (50, 150), font, 1.5, (255, 255, 255), 2)","sub_path":"laneSegmentation.py","file_name":"laneSegmentation.py","file_ext":"py","file_size_in_byte":6597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"461329175","text":"import os\nfrom fnmatch import fnmatch\n\nfrom mommy_spatial_generators import MOMMY_SPATIAL_FIELDS\nfrom varlet import variable\n\n# Django settings for aol project.\nhere = lambda *path: os.path.normpath(os.path.join(os.path.dirname(__file__), *path))\nROOT = lambda *path: here(\"../\", *path)\n\nTEST_RUNNER = 'aol.testrunner.AOLRunner'\n\nAUTH_USER_MODEL = 'users.User'\n\nLOGIN_URL = '/admin/login'\nLOGOUT_URL = '/admin/logout'\nLOGIN_REDIRECT_URL = '/admin'\nCAS_SERVER_URL = 'https://sso.pdx.edu/cas/login'\n\nDEBUG = variable(\"DEBUG\", default=False)\nTEMPLATE_DEBUG = DEBUG\n# ('Your Name', 'your_email@example.com'),\nADMINS = variable(\"ADMINS\", [])\n\n# ODIN users must be members of one of these groups to login to the admin area\nALLOWED_LOGIN_GROUPS = [\"resgrp116\", \"arc\"]\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.contrib.gis.db.backends.postgis',\n # database name\n 'NAME': variable(\"DB_NAME\", default='aol'),\n # database user. Ensure this user has the createdb privilege if you\n # want to run tests\n 'USER': variable(\"DB_USER\", default='root'),\n 'PASSWORD': variable(\"DB_PASSWORD\", default='vagrant'),\n # the empty string will default to the DB specific default\n 'HOST': variable(\"DB_HOST\", default=\"\"),\n 'PORT': '',\n 'OPTIONS': {\n 'options': '-c search_path=public'\n }\n }\n}\n\n\nLDAP = {\n 'default': {\n 'host': \"ldaps://ldap-bulk.oit.pdx.edu\",\n 'username': 'uid=aolresearch,ou=service,dc=pdx,dc=edu',\n 'password': variable(\"LDAP_PASSWORD\", default=''),\n 'search_dn': 'ou=Group,dc=pdx,dc=edu',\n },\n}\n\n\nSECRET_KEY = variable(\"SECRET_KEY\", default=os.urandom(64).decode(\"latin1\"))\n\nMOMMY_CUSTOM_FIELDS_GEN = MOMMY_SPATIAL_FIELDS\n\n# Hosts/domain names that are valid for this site; required if DEBUG is False\n# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts\nALLOWED_HOSTS = ['.pdx.edu']\n\n# with a trailing slash\nTILE_URL = \"http://gis.rc.pdx.edu/arcgis/rest/services/aol/nlcd/MapServer/\"\n\n\n# allow the use of wildcards in the INTERAL_IPS setting\nclass IPList(list):\n # do a unix-like glob match\n # E.g. '192.168.1.100' would match '192.*'\n def __contains__(self, ip):\n for ip_pattern in self:\n if fnmatch(ip, ip_pattern):\n return True\n return False\n\nINTERNAL_IPS = IPList(['10.*', '192.168.*'])\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = 'US/Pacific'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = False\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = False\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = True\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/var/www/example.com/media/\"\nMEDIA_ROOT = ROOT(\"media\")\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://example.com/media/\", \"http://media.example.com/\"\nMEDIA_URL = '/media/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/var/www/example.com/static/\"\nSTATIC_ROOT = ROOT(\"static\")\n\n# URL prefix for static files.\n# Example: \"http://example.com/static/\", \"http://static.example.com/\"\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n here(\"static\"),\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'djangocas.middleware.CASMiddleware',\n 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'aol.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'aol.wsgi.application'\n\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n here(\"templates\"),\n)\n\nINSTALLED_APPS = (\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.flatpages',\n # 'debug_toolbar',\n 'arcutils',\n 'aol.users',\n 'aol.lakes',\n 'aol.documents',\n 'aol.photos',\n 'aol.facilities',\n # Uncomment the next line to enable the admin:\n # 'django.contrib.admin',\n # Uncomment the next line to enable admin documentation:\n # 'django.contrib.admindocs',\n)\n\nAUTHENTICATION_BACKENDS = (\n 'django.contrib.auth.backends.ModelBackend',\n 'aol.users.backends.AOLBackend',\n)\n","sub_path":"aol/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":6228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"162087530","text":"# Chocolates by numbers (see codility.com for more details)\n# Performance 0 %.\n\n\ndef solution(N, M):\n A = [element for element in range(N)]\n index, steps = (0, 0)\n while A[index] != None:\n A[index] = None\n steps += 1\n index += M\n index %= N\n return steps\n","sub_path":"python_solutions/chocolates_by_numbers.py","file_name":"chocolates_by_numbers.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"381660081","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport pandas as pd\n\n'''\n\n生徒ファイル名、パーセントを引数に持ち、\n指定されたパ���セントで正解データを作成し、teacher_dataに出力する\n\n'''\n\ndef make(student_name,df_list,per) :\n\n std_num = len(student_name)\n df_num = len(df_list[0].columns)\n d = []\n f = []\n\n '''\n print(student_name)\n print(df_list[0].values[2])\n print(per)\n '''\n\n for j in range(df_num):\n for k in range(std_num):\n d.append(df_list[k].values[j])\n f.append(np.sum(d,axis = 0))\n d = []\n\n csv_list = []\n for a in range(0,df_num):\n csv_list.append([])\n\n for i in range(df_num):\n for j in range(df_num):\n if f[i][j] > std_num * (per / 100):\n csv_list[i].append(1)\n else :\n csv_list[i].append(0)\n\n '''\n ヘッダー用リスト作成\n '''\n list_columns=[]\n for i in range(df_num):\n list_columns.append(\"C\"+str(i))\n\n '''\n 作成するCSVのインデックス用リスト\n '''\n list_index = []\n for i in range(df_num):\n list_index.append(\"L\"+str(i))\n\n df = pd.DataFrame(\n csv_list,\n columns = list_columns,\n index = list_index\n )\n print(df)\n\n df.to_csv('./teacher_data/teacher.csv',encoding=\"shift-jis\")\n","sub_path":"make.py","file_name":"make.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"475463095","text":"# https://atcoder.jp/contests/abc148/tasks/abc148_b\n\nn = int(input().strip())\ns, t = map(lambda x: x.strip(), input().split())\n\nans = \"\"\nfor i in range(n):\n ans += s[i]\n ans += t[i]\n\nprint(ans)\n","sub_path":"beginner_contests/148/B/python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"72782002","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 25 16:52:17 2021\n\n@author: ombretta\n\"\"\"\n\nimport json \nimport os \nimport sys\nimport cv2\n\ndef get_video_properties(video_path):\n cap = cv2.VideoCapture(video_path)\n frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = int(cap.get(cv2.CAP_PROP_FPS))\n cap.release()\n return frameCount, frameWidth, frameHeight, fps\n\ndef get_classes(file):\n with open(file, \"r\") as f:\n text = f.readlines()\n print(text)\n classes = {}\n for row in text:\n print(row)\n if \",\" in row:\n classes[row.split(\",\")[0]] = row.split(\",\")[1].replace(\"\\n\", \"\")\n print(classes)\n return classes\n\ndef main(dataset_dir=\"../../../CV-DataSets/Moments_in_Time_Raw/\"):\n \n json_file = os.path.join(dataset_dir, \"moments_in_time.json\")\n print(\"dataset folder:\", dataset_dir)\n \n # Get classes\n moments_categories_file = dataset_dir+\"moments_categories.txt\"\n #moments_categories_file = \"/Users/ombretta/Desktop/moments_categories.txt\" # to delete\n classes = get_classes(moments_categories_file)\n \n \n # Filling the dataset dictionary\n mom_in_time = {}\n mom_in_time['labels'] = list(classes.keys())\n mom_in_time['database'] = {}\n \n # Training\n training_path = dataset_dir+\"training/\"\n subset = \"training\"\n \n for video_class in os.listdir(training_path):\n \n if os.path.isdir(os.path.join(training_path, video_class)):\n \n label = video_class\n print(video_class, label)\n \n for v in os.listdir(os.path.join(training_path, video_class)):\n \n video_path = os.path.join(training_path, video_class, v)\n tot_frames, _, _, _ = get_video_properties(video_path)\n segment = [0, tot_frames]\n \n video_name = v.split(\".mp4\")[0]\n print(video_name, subset, label, tot_frames)\n if tot_frames > 0:\n mom_in_time['database'][video_name] = {}\n mom_in_time['database'][video_name]['subset'] = subset\n mom_in_time['database'][video_name]['annotations'] = {'label': label, 'segment': segment}\n else: \n print(\"skipped\")\n \n # Validation\n validation_path = dataset_dir+\"validation/\"\n subset = \"validation\"\n \n for video_class in os.listdir(validation_path):\n \n if os.path.isdir(os.path.join(validation_path, video_class)):\n \n label = video_class\n print(video_class, label)\n \n for v in os.listdir(os.path.join(validation_path, video_class)):\n \n video_path = os.path.join(validation_path, video_class, v)\n tot_frames, _, _, _ = get_video_properties(video_path)\n segment = [0, tot_frames]\n\n video_name = v.split(\".mp4\")[0] \n print(video_name, subset, label, tot_frames)\n if tot_frames > 0:\n mom_in_time['database'][video_name] = {}\n mom_in_time['database'][video_name]['subset'] = subset\n mom_in_time['database'][video_name]['annotations'] = {'label': label, 'segment': segment}\n else: \n print(\"skipped\")\n \n \n with open(json_file, \"w\") as file: \n json.dump(mom_in_time, file)\n \n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser(description='Command line options')\n parser.add_argument('--dataset_dir', type=str, dest='dataset_dir')\n args = parser.parse_args(sys.argv[1:])\n main(**{k: v for (k, v) in vars(args).items() if v is not None})\n","sub_path":"create_moments_in_time_json.py","file_name":"create_moments_in_time_json.py","file_ext":"py","file_size_in_byte":3940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"192363678","text":"from turtle import*\r\npen = Pen()\r\n\r\nchoice = 'Y'\r\n\r\nwhile choice == 'Y':\r\n cor = int(input('Колко ъгъла има фигурата?\\nАко искаш пентаграма въведи [0]\\n'))\r\n pen.clear()\r\n\r\n if (cor != 0):\r\n for i in range(cor):\r\n pen.forward(100)\r\n pen.right(360/cor)\r\n choice = str(input('Do you want to draw again? Y or N: '))\r\n \r\n else:\r\n for i in range(5):\r\n pen.forward(100)\r\n pen.right(180-(180/5))\r\n choice = str(input('Do you want to draw again? Y or N: '))\r\n \r\n","sub_path":"week 6 homework/DrawingByCorners.py","file_name":"DrawingByCorners.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"265419084","text":"from sc2.unit import Unit\nfrom sharpy.plans.acts import ActBase\nfrom sharpy.managers.roles import UnitTask\nfrom sharpy.knowledges import Knowledge\nfrom sc2 import UnitTypeId, AbilityId\n\n\nclass OracleHarass(ActBase):\n def __init__(self):\n super().__init__()\n self.oracle_tag = None\n self.harass_started = False\n self.already_begin_attack = False\n self.reached_position = False\n\n async def start(self, knowledge: Knowledge):\n await super().start(knowledge)\n\n async def execute(self) -> bool:\n oracle = self.knowledge.unit_cache.own(UnitTypeId.ORACLE).ready\n position = self.get_first_oracle_flank_position()\n if oracle.amount >= 1:\n self.knowledge.roles.set_task(UnitTask.Reserved, oracle.first)\n self.oracle_tag = oracle.first.tag\n self.harass_started = True\n\n if self.harass_started:\n harass_oracle: Unit = self.knowledge.unit_cache.by_tag(self.oracle_tag)\n if harass_oracle is not None:\n if not self.reached_position:\n if harass_oracle.distance_to(position) <= 5 and harass_oracle.energy >= 50:\n self.reached_position = True\n elif harass_oracle.shield_percentage >= 0.95:\n enemy_workers = self.knowledge.unit_cache.enemy_in_range(harass_oracle.position3d, 13).of_type(\n [UnitTypeId.SCV, UnitTypeId.PROBE, UnitTypeId.DRONE, UnitTypeId.MULE]\n )\n # worth activate weapon\n if enemy_workers.amount >= 3 and harass_oracle.energy >= 50:\n self.reached_position = True\n else:\n self.oracle_evasive_move_to(position)\n else:\n if self.knowledge.our_zones:\n base = self.knowledge.our_zones[0].behind_mineral_position_center\n self.oracle_evasive_move_to(base)\n else:\n await self.harass_with_oracle()\n return True # never block\n\n async def harass_with_oracle(self):\n harass_oracle: Unit = self.knowledge.unit_cache.by_tag(self.oracle_tag)\n if harass_oracle is not None:\n if not self.already_begin_attack:\n enemy_workers = self.knowledge.unit_cache.enemy_in_range(harass_oracle.position3d, 11).of_type(\n [UnitTypeId.SCV, UnitTypeId.PROBE, UnitTypeId.DRONE, UnitTypeId.MULE]\n )\n # worth activate weapon\n if enemy_workers.amount >= 3 and harass_oracle.energy >= 50:\n self.already_begin_attack = True\n self.do(harass_oracle(AbilityId.BEHAVIOR_PULSARBEAMON))\n return\n if self.already_begin_attack and harass_oracle.energy <= 1:\n self.already_begin_attack = False\n return\n\n if not self.oracle_in_danger() and self.already_begin_attack:\n enemy_workers = self.knowledge.unit_cache.enemy_in_range(harass_oracle.position3d, 11).of_type(\n [UnitTypeId.SCV, UnitTypeId.PROBE, UnitTypeId.DRONE, UnitTypeId.MULE]\n )\n if enemy_workers.exists:\n # try attack the ones that can be one shot killed\n attack_target = None\n for worker in enemy_workers:\n if worker.shield_health_percentage < 0.5:\n attack_target = worker\n break\n if attack_target is None:\n attack_target = enemy_workers.closest_to(harass_oracle)\n self.do(harass_oracle.attack(attack_target))\n else:\n # gather intel\n self.oracle_evasive_move_to(self.knowledge.enemy_expansion_zones[0].behind_mineral_position_center)\n else:\n if harass_oracle.energy <= 2:\n self.do(harass_oracle(AbilityId.BEHAVIOR_PULSARBEAMOFF))\n self.already_begin_attack = False\n self.reached_position = False\n self.oracle_evasive_move_to(self.knowledge.enemy_expansion_zones[0].behind_mineral_position_center)\n\n def oracle_in_danger(self):\n harass_oracle: Unit = self.knowledge.unit_cache.by_tag(self.oracle_tag)\n enemy_anti_air_units = self.knowledge.unit_cache.enemy_in_range(harass_oracle.position3d, 11) \\\n .filter(lambda unit: unit.can_attack_air).visible\n enemy_anti_air_structure = self.knowledge.unit_cache.enemy_in_range(harass_oracle.position3d, 11) \\\n .of_type(UnitTypeId.BUNKER)\n for AA in enemy_anti_air_units:\n if AA.position.distance_to(harass_oracle) < AA.air_range + 3:\n return True\n\n for AA in enemy_anti_air_structure:\n if AA.position.distance_to(harass_oracle) < 12:\n return True\n return False\n\n def oracle_evasive_move_to(self, position_to):\n harass_oracle: Unit = self.knowledge.unit_cache.by_tag(self.oracle_tag)\n enemy_anti_air_structure = self.knowledge.unit_cache.enemy_in_range(harass_oracle.position3d, 11) \\\n .of_type(UnitTypeId.BUNKER)\n enemy_anti_air_units = self.knowledge.unit_cache.enemy_in_range(harass_oracle.position3d, 11) \\\n .filter(lambda unit: unit.can_attack_air).visible\n\n if enemy_anti_air_units.exists or enemy_anti_air_structure.exists:\n position = harass_oracle.position3d\n for aa in enemy_anti_air_units:\n distance = harass_oracle.distance_to(aa.position3d)\n if distance > 0:\n amount_of_evade = 15 - distance\n position = position.towards(aa, - amount_of_evade)\n for aa in enemy_anti_air_structure:\n distance = harass_oracle.distance_to(aa.position3d)\n if distance > 0:\n amount_of_evade = 15 - distance\n position = position.towards(aa, - amount_of_evade)\n # after the for loop, position is the best vector away from enemy\n distance_to_best_evade_point = harass_oracle.distance_to(position) * 0.7 + 0.1\n should_go = position.towards(position_to, distance_to_best_evade_point)\n self.do(harass_oracle.move(should_go))\n else:\n self.do(harass_oracle.move(position_to))\n\n def get_first_oracle_flank_position(self):\n distance = 1.3 * self.knowledge.enemy_expansion_zones[1].behind_mineral_position_center. \\\n distance_to(self.knowledge.enemy_expansion_zones[0].center_location)\n return self.knowledge.enemy_expansion_zones[0].center_location. \\\n towards(self.knowledge.enemy_expansion_zones[1].behind_mineral_position_center, distance)\n","sub_path":"sharpy-sc2/sharpy/plans/tactics/protoss/oracle_harass.py","file_name":"oracle_harass.py","file_ext":"py","file_size_in_byte":6984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"107107990","text":"from sqlalchemy import Column, UnicodeText\n\nfrom ironbot.modules.sql_helper import BASE, SESSION\n\n\nclass Serverpinger(BASE):\n __tablename__ = \"serverpinger\"\n url = Column(UnicodeText, primary_key=True)\n\n def __init__(self, url):\n self.url = url\n\n\nServerpinger.__table__.create(checkfirst=True)\n\n\ndef add_ping(url):\n pinger = Serverpinger(url)\n SESSION.add(pinger)\n SESSION.commit()\n\n\ndef rmping(url):\n rmpinger = SESSION.query(Serverpinger).get(url)\n if rmpinger:\n SESSION.delete(rmpinger)\n SESSION.commit()\n\n\ndef get_all_url():\n stark = SESSION.query(Serverpinger).all()\n SESSION.close()\n return stark\n\n\ndef is_ping_indb(url):\n try:\n return SESSION.query(Serverpinger).filter(Serverpinger.url == url).one()\n except:\n return None\n finally:\n SESSION.close()\n","sub_path":"ironbot/modules/sql_helper/server_pinger_sql.py","file_name":"server_pinger_sql.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"86788271","text":"import asyncio\nimport itertools\nimport logging\nfrom functools import partial\n\nfrom transitions.core import Condition, Transition, State, MachineError, Machine, Event\n\n\n_LOGGER = logging.getLogger(__name__)\n_LOGGER.addHandler(logging.NullHandler())\n\n\nclass AsyncCondition(Condition):\n \"\"\" A helper class to call async condition checks in the intended way.\"\"\"\n\n async def check(self, event_data):\n \"\"\" Check whether the condition passes.\n Args:\n event_data (EventData): An EventData instance to pass to the\n condition (if event sending is enabled) or to extract arguments\n from (if event sending is disabled). Also contains the data\n model attached to the current machine which is used to invoke\n the condition.\n \"\"\"\n condition_check = self.get_condition_check(event_data)\n if asyncio.iscoroutine(condition_check):\n condition_check = await condition_check\n return condition_check == self.target\n\n\nclass AsyncState(State):\n \"\"\"A persistent representation of a state managed by a ``AsyncMachine``.\"\"\"\n\n async def enter(self, event_data):\n \"\"\" Triggered when a state is entered. \"\"\"\n _LOGGER.debug(\"%sEntering state %s. Processing callbacks...\", event_data.machine.name, self.name)\n for handle in self.on_enter:\n await event_data.machine.callback(handle, event_data)\n _LOGGER.info(\"%sEntered state %s\", event_data.machine.name, self.name)\n\n async def exit(self, event_data):\n \"\"\" Triggered when a state is exited. \"\"\"\n _LOGGER.debug(\"%sExiting state %s. Processing callbacks...\", event_data.machine.name, self.name)\n for handle in self.on_exit:\n await event_data.machine.callback(handle, event_data)\n _LOGGER.info(\"%sExited state %s\", event_data.machine.name, self.name)\n\n\nclass AsyncTransitionMixin:\n \"\"\" Representation of a async transition managed by a ``AsyncMachine`` instance.\"\"\"\n\n condition_class = AsyncCondition\n\n async def execute(self, event_data):\n \"\"\" Execute the transition.\n Args:\n event_data: An instance of class EventData.\n Returns: boolean indicating whether or not the transition was\n successfully executed (True if successful, False if not).\n \"\"\"\n _LOGGER.debug(\"%sInitiating transition from state %s to state %s...\",\n event_data.machine.name, self.source, self.dest)\n machine = event_data.machine\n\n for func in self.prepare:\n await machine.callback(func, event_data)\n _LOGGER.debug(\"Executed callback '%s' before conditions.\", func)\n\n for cond in self.conditions:\n if not await cond.check(event_data):\n _LOGGER.debug(\"%sTransition condition failed: %s() does not return %s. Transition halted.\",\n event_data.machine.name, cond.func, cond.target)\n return False\n for func in itertools.chain(machine.before_state_change, self.before):\n await machine.callback(func, event_data)\n _LOGGER.debug(\"%sExecuted callback '%s' before transition.\", event_data.machine.name, func)\n\n if self.dest:\n await self._change_state(event_data)\n\n for func in itertools.chain(self.after, machine.after_state_change):\n await machine.callback(func, event_data)\n _LOGGER.debug(\"%sExecuted callback '%s' after transition.\", event_data.machine.name, func)\n return True\n\n async def _change_state(self, event_data):\n await event_data.machine.get_state(self.source).exit(event_data)\n event_data.machine.set_state(self.dest, event_data.model)\n event_data.update(event_data.model)\n await event_data.machine.get_state(self.dest).enter(event_data)\n\n\nclass AsyncTransition(AsyncTransitionMixin, Transition):\n pass\n\n\nclass AsyncEventMixin:\n \"\"\" A collection of transitions assigned to the same trigger\"\"\"\n\n async def trigger(self, model, *args, **kwargs):\n func = partial(self._trigger, model, *args, **kwargs)\n # pylint: disable=protected-access\n # noinspection PyProtectedMember\n # Machine._process should not be called somewhere else. That's why it should not be exposed\n # to Machine users.\n return await self.machine._process(func)\n\n async def _trigger(self, model, *args, **kwargs):\n \"\"\" Internal trigger function called by the ``Machine`` instance. This should not\n be called directly but via the public method ``Machine.trigger``.\n \"\"\"\n event_data = self._prepare_event_data(model, *args, **kwargs)\n if event_data:\n return await self._process(event_data)\n else:\n return False\n\n async def _process(self, event_data):\n for func in self.machine.prepare_event:\n await self.machine.callback(func, event_data)\n _LOGGER.debug(\"Executed machine preparation callback '%s' before conditions.\", func)\n\n try:\n for trans in self.transitions[event_data.state.name]:\n event_data.transition = trans\n transition_result = await trans.execute(event_data)\n\n if transition_result:\n event_data.result = True\n break\n except Exception as e:\n event_data.error = e\n raise\n finally:\n for func in self.machine.finalize_event:\n await self.machine.callback(func, event_data)\n _LOGGER.debug(\"Executed machine finalize callback '%s'.\", func)\n return event_data.result\n\n\nclass AsyncEvent(AsyncEventMixin, Event):\n pass\n\n\nclass AsyncMachineMixin:\n\n async def callback(self, func, event_data):\n\n callback = super().callback(func, event_data)\n if asyncio.iscoroutine(callback):\n await callback\n\n async def _process(self, trigger):\n # default processing\n if not self.has_queue:\n if not self._transition_queue:\n # if trigger raises an Error, it has to be handled by the Machine.process caller\n return await trigger()\n else:\n raise MachineError(\n \"Attempt to process events synchronously while transition queue is not empty!\"\n )\n\n # process queued events\n self._transition_queue.append(trigger)\n # another entry in the queue implies a running transition; skip immediate execution\n if len(self._transition_queue) > 1:\n return True\n\n # execute as long as transition queue is not empty\n while self._transition_queue:\n try:\n callback = self._transition_queue[0]()\n\n if asyncio.iscoroutine(callback):\n await callback\n\n self._transition_queue.popleft()\n except Exception:\n # if a transition raises an exception, clear queue and delegate exception handling\n self._transition_queue.clear()\n raise\n return True\n\n async def dispatch(self, trigger, *args, **kwargs):\n \"\"\" Trigger an event on all models assigned to the machine.\n Args:\n trigger (str): Event name\n *args (list): List of arguments passed to the event trigger\n **kwargs (dict): Dictionary of keyword arguments passed to the event trigger\n Returns:\n bool The truth value of all triggers combined with AND\n \"\"\"\n return all([await getattr(model, trigger)(*args, **kwargs) for model in self.models])\n\n\nclass AsyncMachine(AsyncMachineMixin, Machine):\n \"\"\" Machine manages states, transitions and models. In case it is initialized without a specific model\n (or specifically no model), it will also act as a model itself. Machine takes also care of decorating\n models with conveniences functions related to added transitions and states during runtime.\"\"\"\n\n state_cls = AsyncState\n transition_cls = AsyncTransition\n event_cls = AsyncEvent\n","sub_path":"transitions/aio/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":8135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"22135341","text":"\"\"\"\nDemonstrate the recursive binary search algorithm.\nauthor: Sean Strout (sps@cs.rit.edu)\ncontributor: Trudy Howles tmh\ncontributor: ben k steele bks\ncontributor: Arthur Nunes-Harwitt anh\nModification History:\n10/8/2010 -- lecture content reordering\n01/13/2011 -- docstring content revisions\n08/28/2012 -- Sentinel changed to None\n\"\"\"\n\n\ndef binary_search(data, target, start, end):\n \"\"\"\n binary_search : List(Orderable) Orderable NatNum NatNum -> NatNum or NoneType\n Perform a binary search for a target value between start and end indices.\n Parameters:\n data - a list of sorted data\n target - the target value to search for\n start - the starting index in data that is part of this search\n end - the ending index in data that is part of this search\n Returns:\n index of target in data, if present; otherwise None.\n \"\"\"\n \n # base condition - terminate when start passes end index\n if start > end:\n return None\n \n # find the middle value between start and end indices\n mid_index = (start + end) // 2\n mid_value = data[mid_index]\n \n # debug statement prints the data list\n print(\"Searching for\", target, \":\", data[start:mid_index],\n \"*\" + str(mid_value) + \"*\",\n data[mid_index + 1:end + 1])\n \n if target == mid_value:\n return mid_index\n elif target < mid_value:\n return binary_search(data, target, start, mid_index - 1)\n else:\n return binary_search(data, target, mid_index + 1, end)\n\n\ndef get_index(data, target):\n \"\"\"\n get_index : List(Orderable) Orderable -> NatNum or NoneType\n get_index returns the index of target in data or None if not target found.\n Parameters:\n data - a list of sorted data\n target - the target value to search for\n Returns:\n The index of the target element in data, if it is present,\n otherwise None.\n \"\"\"\n \n # search for the target across all elements in data\n return binary_search(data, target, 0, len(data) - 1)\n\n\ndef main():\n \"\"\"\n main : Void -> None\n main creates an ordered list of Integers based on user parameters and\n allows the user to 'binary search' for values.\n \"\"\"\n \n print(\"Step 1 - Create your sorted data...\")\n start = int(input(\"Start: \"))\n stop = int(input(\"Stop: \"))\n step = int(input(\"Step: \"))\n \n data = []\n for loop in range(start, stop, step):\n data += [loop]\n \n print(\"\\nData: \", data)\n print(\"Number of elements: \", len(data))\n \n print(\"\\nStep 2 - Enter target value to search for...\")\n target = int(input(\"Target: \"))\n \n print(\"\\nStep 3 - Get index of target value in data (None if doesn't exist)...\")\n index = get_index(data, target)\n print()\n if index != None:\n print(target, \"found at index\", index)\n else:\n print(target, \"not found\")\n\n\n# # run program\nmain()\n","sub_path":"Libraries/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"154525801","text":"import csv\nimport sqlite3\n\nconn = sqlite3.connect('people.db')\nf = open('filtereddata.csv')\ncsv_f = csv.reader(f)\nc = conn.cursor()\n# Create table\nc.execute('''CREATE TABLE people\n (post_id text, subject text, details text, created_at text, platform text, question_tags text, state text, reply text, replied text, click_count integer, marriage_status text, employment_status text)''')\n\nfor row in csv_f:\n for s in row:\n text = row[2]\n marriage_status = \"\"\n employment_status = \"\"\n if \"husband\" in text or \"wife\" in text or \"married\" in text or \"spouse\" in text:\n marriage_status = \"married\"\n if \"single mom\" in text or \"single dad\" in text or \"divorced\" in text or \"single parent\" in text:\n marriage_status = \"divorced\"\n if \"unemployed\" in text:\n employment_status = \"unemployed\"\n else:\n employment_status = \"employed\"\n # Insert a row of data\n c.execute('''INSERT INTO people(post_id, subject, details, created_at,\n platform, question_tags, state, reply, replied, click_count, marriage_status, employment_status)\n VALUES(:post_id,:subject, :details, :created_at, :platform, :question_tags, :state,\n :reply, :replied, :click_count, :marriage_status, :employment_status)''',\n {'post_id':row[0], 'subject':row[1], 'details':row[2], 'created_at':row[3],\n 'platform':row[4], 'question_tags':row[5], 'state':row[6], 'reply':row[7],\n 'replied':row[8], 'click_count':0, 'marriage_status':marriage_status,\n 'employment_status':employment_status})\n\n# Save (commit) the changes\nconn.commit()\n# We can also close the connection if we are done with it.\n# Just be sure any changes have been committed or they will be lost.\nconn.close()\n\n ####\n\n\n\n\n","sub_path":"python/read_data.py","file_name":"read_data.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"1234319","text":"import fnmatch\nimport os, glob\nfrom Bio import SeqIO\nimport sys, traceback\nimport argparse\n\ndef rename_fasta(infile, tagfile, outfile, newtagfile):\n counters = {}\n tags = {}\n newseqs=[]\n \n t = open(tagfile,'r')\n tn = open(newtagfile,'w')\n \n for line in t:\n data = line.strip().split('\\t')\n tags[data[0]] = data\n \n seqs = SeqIO.parse(infile, 'fasta')\n for s in seqs:\n data = tags[s.id]\n counter = counters.get(data[2], 0)\n counter = counter + 1\n idtag = \"%s|%06d\"%(data[2], counter)\n counters[data[2]]=counter\n s.id = idtag\n s.description = \"\"\n tn.write(idtag)\n tn.write('\\t')\n tn.write('\\t'.join(data))\n tn.write('\\n')\n newseqs.append(s)\n tn.close()\n \n try:\n SeqIO.write((newseqs), outfile, 'fasta')\n except:\n print(\"Exception in user code:\")\n print(\"-\"*60)\n traceback.print_exc(file=sys.stdout)\n print(\"-\"*60)\n\n\nif __name__=='__main__':\n \n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--fasta', dest = 'fasta', help = 'fasta input file', required = True)\n parser.add_argument('-t', '--tags', dest = 'tags', help = 'tags', required = True)\n parser.add_argument('-o', '--outfile', dest = 'outfile', help = 'outfile', required = True)\n parser.add_argument('-n', '--newtags', dest = 'newtags', help = 'tags', required = True)\n \n args = parser.parse_args()\n rename_fasta(args.fasta, args.tags, args.outfile, args.newtags)\n ","sub_path":"comp genomics pipeline/src/rename_proteins.py","file_name":"rename_proteins.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"160949643","text":"import math, sys\nclass Solution(object):\n def rangeBitwiseAnd(self, m, n):\n \"\"\"\n :type m: int\n :type n: int\n :rtype: int\n \"\"\"\n if m == n:\n return m\n gap = n - m\n bits = int(math.log(gap,2)) # how many bits have both '0' and '1'\n mask = 1\n while bits > 0:\n mask <<= 1\n mask += 1\n bits -= 1\n return m&n&(sys.maxint - mask) # m&n because the num between m and n will not change the result of m&n\n","sub_path":"Python/RangeBitAnd.py","file_name":"RangeBitAnd.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"346775377","text":"import re\nimport sys\n\nimport matplotlib.patches as mpatches\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport wgdi.base as base\n\n\nclass align_dotplot():\n def __init__(self, options):\n self.position = 'order'\n self.figsize = 'default'\n self.classid = 'class1'\n for k, v in options:\n setattr(self, str(k), v)\n print(str(k), ' = ', v)\n if hasattr(self, 'ks_area'):\n self.ks_area = [float(k) for k in self.ks_area.split(',')]\n else:\n self.ks_area = [-1, 3]\n if hasattr(self, 'colors'):\n self.colors = [str(k) for k in self.colors.split(',')]\n else:\n self.colors = ['red', 'blue', 'green', 'black', 'orange']\n if not hasattr(self, 'blockinfo_reverse'):\n self.blockinfo_reverse = 'false'\n\n def pair_positon(self, alignment, loc1, loc2, colors):\n alignment.index = alignment.index.map(loc1)\n data, i = [], 0\n for k in alignment.columns:\n df = alignment[k].map(loc2)\n df.dropna(axis=0, how='any', inplace=True)\n for index, row in df.iteritems():\n data.append([index, row, colors[i]])\n i += 1\n df = pd.DataFrame(data, columns=['loc1', 'loc2', 'color'])\n return df\n\n def run(self):\n axis = [0, 1, 1, 0]\n lens1 = base.newlens(self.lens1, self.position)\n lens2 = base.newlens(self.lens2, self.position)\n if re.search('\\d', self.figsize):\n self.figsize = [float(k) for k in self.figsize.split(',')]\n else:\n self.figsize = np.array(\n [1, float(lens1.sum())/float(lens2.sum())])*10\n plt.rcParams['ytick.major.pad'] = 0\n fig, ax = plt.subplots(figsize=self.figsize)\n ax.xaxis.set_ticks_position('top')\n step1 = 1 / float(lens1.sum())\n step2 = 1 / float(lens2.sum())\n base.dotplot_frame(fig, ax, lens1, lens2, step1, step2,\n self.genome1_name, self.genome2_name, [0, 1])\n gff1 = base.newgff(self.gff1)\n gff2 = base.newgff(self.gff2)\n gff1 = base.gene_location(gff1, lens1, step1, self.position)\n gff2 = base.gene_location(gff2, lens2, step2, self.position)\n bkinfo = pd.read_csv(self.blockinfo, index_col='id')\n if self.blockinfo_reverse == True or self.blockinfo_reverse.upper() == 'TRUE':\n bkinfo[['chr1', 'chr2']] = bkinfo[['chr2', 'chr1']]\n bkinfo[['block1', 'block2']] = bkinfo[['block2', 'block1']]\n bkinfo['chr1'] = bkinfo['chr1'].astype(str)\n bkinfo['chr2'] = bkinfo['chr2'].astype(str)\n bkinfo[self.classid] = bkinfo[self.classid].astype(str)\n bkinfo=bkinfo[bkinfo['chr1'].isin(lens1.index) & (bkinfo['chr2'].isin(lens2.index))]\n align = self.alignment(gff1, gff2, bkinfo)\n alignment = align[gff1.columns[-int(\n len(bkinfo[self.classid].drop_duplicates())):]]\n alignment.to_csv(self.savefile, header=None)\n df = self.pair_positon(\n alignment, gff1['loc'], gff2['loc'], self.colors)\n plt.scatter(df['loc2'], df['loc1'], s=float(self.markersize), c=df['color'],\n alpha=0.5, edgecolors=None, linewidths=0, marker='o')\n ax.axis(axis)\n plt.subplots_adjust(left=0.07, right=0.97, top=0.93, bottom=0.03)\n plt.savefig(self.savefig, dpi=500)\n plt.show()\n sys.exit(0)\n\n def alignment(self, gff1, gff2, bkinfo):\n for cl, group in bkinfo.groupby([self.classid]):\n name = 'l'+cl\n gff1[name] = np.nan\n group = group.sort_values(by=['length'], ascending=[True])\n for index, row in group.iterrows():\n b1 = row['block1'].split('_')\n b2 = row['block2'].split('_')\n ks = row['ks'].split('_')\n ks = list(map(float, ks))\n block1, block2 = [], []\n for i in range(len(ks)):\n if self.ks_area[0] <= ks[i] <= self.ks_area[1]:\n block1.append(int(b1[i]))\n block2.append(int(b2[i]))\n block1 = list(map(int, block1))\n block2 = list(map(int, block2))\n area = gff1[(gff1['chr'] == row['chr1']) & (\n gff1['order'] >= min(block1)) & (gff1['order'] <= max(block1))].index\n index1 = gff1[(gff1['chr'] == row['chr1']) & (gff1['order'].isin(\n block1))].sort_values(by=['order'], key=lambda x: block1).index\n index2 = gff2[(gff2['chr'] == row['chr2']) & (gff2['order'].isin(\n block2))].sort_values(by=['order'], key=lambda x: block2).index\n gff1.loc[index1, name] = index2\n gff1.loc[gff1.index.isin(area) & gff1[name].isna(), name] = '.'\n return gff1\n","sub_path":"wgdi/align_dotplot.py","file_name":"align_dotplot.py","file_ext":"py","file_size_in_byte":4896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"213572193","text":"from lib.dataset.dataietr import FaceKeypointDataIter\nfrom train_config import config\nfrom lib.core.api.keypoint import Keypoints\nimport numpy as np\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\nimport cv2\nfrom train_config import config as cfg\ncfg.TRAIN.batch_size=1\n\nval_ds = FaceKeypointDataIter(cfg.DATA.root_path,cfg.DATA.val_txt_path,False)\n\nface=Keypoints('./model/keypoints.pb')\n\n\nfor one_ele,_, in val_ds:\n print(_)\n\n img_show=np.array(one_ele)\n res=face.simple_run(one_ele)\n #print(res)\n res=res[0][:136].reshape((-1,2))\n img_show=img_show.astype(np.uint8)\n\n img_show=cv2.cvtColor(img_show, cv2.COLOR_BGR2RGB)\n\n for _index in range(res.shape[0]):\n x_y = res[_index]\n cv2.circle(img_show, center=(int(x_y[0] * config.MODEL.hin),\n int(x_y[1] * config.MODEL.win)),\n color=(255, 122, 122), radius=1, thickness=2)\n\n cv2.imshow('tmp',img_show)\n cv2.waitKey(0)\n","sub_path":"vis.py","file_name":"vis.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"225168633","text":"from typing import List\nfrom . import utils\nfrom .models.api_types import AddressBalance, Block, Transaction, TransactionInput, TransactionOutput, TxUnspent\nimport http3\nimport json\n\n\nasync def get_account(client_url: str, address: str) -> AddressBalance:\n \"\"\"Get account from address\n\n :param client_url: The haskoin API url\n :type client_url: str\n :param address: The BCH address\n :type address: str\n :returns: AddressBalance\n \"\"\"\n try:\n api_url = f'{client_url}/address/{address}/balance'\n\n client = http3.AsyncClient()\n response = await client.get(api_url)\n\n if response.status_code == 200:\n balance_response = json.loads(response.content.decode('utf-8'))\n if \"error\" in balance_response:\n raise Exception(\n f'Error is : {balance_response[\"error\"]}\\nMessage is : {balance_response[\"message\"]}')\n else:\n result = AddressBalance(balance_response['received'], balance_response['utxo'], balance_response['address'],\n balance_response['txs'], balance_response['unconfirmed'], balance_response['confirmed'])\n return result\n else:\n return None\n except Exception as err:\n raise Exception(str(err))\n\n\nasync def get_transaction(client_url: str, tx_id: str) -> Transaction:\n \"\"\"Get transaction by hash\n\n :param client_url: The haskoin API url\n :type client_url: str\n :param tx_id: The transaction id\n :type tx_id: str\n :returns: Transaction info\n :raises: 'failed to query transaction by a given hash' if failed to query transaction by a given hash\n \"\"\"\n try:\n api_url = f'{client_url}/transaction/{tx_id}'\n\n client = http3.AsyncClient()\n response = await client.get(api_url)\n\n if response.status_code == 200:\n balance_response = json.loads(response.content.decode('utf-8'))\n\n inputs = []\n for i in balance_response['inputs']:\n inputs.append(TransactionInput(i[\"pkscript\"], i['value'], i['address'], i['witness'],\n i['sequence'], i['output'], i['sigscript'], i['coinbase'], i['txid']))\n outputs = []\n for i in balance_response['outputs']:\n outputs.append(TransactionOutput(\n i[\"spent\"], i['pkscript'], i['value'], i['address'], i['spender']))\n\n result = Transaction(balance_response[\"time\"], balance_response[\"size\"], inputs, balance_response[\"weight\"], balance_response[\"fee\"], balance_response[\"locktime\"],\n balance_response[\"block\"], outputs, balance_response[\"version\"], balance_response[\"deleted\"], balance_response[\"rbf\"], balance_response[\"txid\"])\n\n return result\n else:\n raise Exception('failed to query transaction by a given hash')\n except Exception as err:\n raise Exception(str(err))\n\n\nasync def get_suggested_tx_fee():\n \"\"\"Get suggested fee amount for Bitcoin cash. (fee per byte)\n\n Note: Haskcoin does not provide fee rate related data\n So use Bitgo API for fee estimation\n Refer: https://app.bitgo.com/docs/#operation/v2.tx.getfeeestimate\n\n :returns: The Bitcoin cash stats\n \"\"\"\n try:\n api_url = 'https://app.bitgo.com/api/v2/bch/tx/fee'\n\n client = http3.AsyncClient()\n response = await client.get(api_url)\n\n if response.status_code == 200:\n fee_response = json.loads(response.content.decode('utf-8'))\n return fee_response[\"feePerKb\"] / 1000 # feePerKb to feePerByte\n else:\n fee_response = json.loads(response.content.decode('utf-8'))\n if \"error\" in fee_response:\n raise Exception(f'Error is : {fee_response[\"error\"]}')\n\n return utils.DEFAULT_SUGGESTED_TRANSACTION_FEE\n except:\n return utils.DEFAULT_SUGGESTED_TRANSACTION_FEE\n\nasync def get_unspent_transactions(client_url , address) -> List[TxUnspent]:\n \"\"\"Get unspent transactions\n\n :param client_url: The haskoin API url\n :type client_url: str\n :param address: The BCH address\n :type address: str\n :returns: The Bitcoin cash stats\n :raises: 'failed to query unspent transactions' if failed to query unspent transactions\n \"\"\"\n try:\n account = await get_account(client_url , address)\n\n api_url = f'{client_url}/address/{address}/unspent?limit={account.txs}'\n\n client = http3.AsyncClient()\n response = await client.get(api_url)\n\n if response.status_code == 200:\n tx_response = json.loads(response.content.decode('utf-8'))\n result = [TxUnspent(i['pkscript'],i['value'],i['address'],Block(i['block']['height'] , i['block']['position']) ,i['index'],i['txid']) for i in tx_response]\n return result\n else:\n raise Exception('failed to query unspent transactions')\n except Exception as err:\n raise Exception(str(err))\n\n# async def broadcast_tx(client_url, tx_hex):\n# \"\"\"Broadcast transaction\n# https://sochain.com/api#send-transaction\n\n# :param client_url: The haskoin API url\n# :type client_url: str\n# :param tx_hex: tranaction hex\n# :type tx_hex: str\n# :returns: Transaction ID\n# \"\"\"\n# try:\n# api_url = f'{client_url}/transactions'\n\n# client = http3.AsyncClient()\n# response = await client.post(url=api_url, data=tx_hex)\n\n# if response.status_code == 200:\n# res = json.loads(response.content.decode('utf-8'))['data']\n# return res['txid']\n# else:\n# return json.loads(response.content.decode('utf-8'))['data']\n# except Exception as err:\n# raise Exception(str(err))","sub_path":"xchainpy/xchainpy_bitcoincash/xchainpy_bitcoincash/haskoin_api.py","file_name":"haskoin_api.py","file_ext":"py","file_size_in_byte":5798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"470639417","text":"from pathlib import Path\n\ndef getEntity(id):\n #return json\n return\n\ndef create_childCategories_table(csv):\n table = []\n data = open(Path(csv))\n content = data.read()\n inputdata = content.split('\\n')\n for i in range(len(inputdata)):\n temp = inputdata[i].split(';')\n if temp[2] != '\\\\N':\n new_row = [temp[0], temp[1], temp[2], temp[3], 0]\n table.append(new_row)\n return table\n\ndef create_parentCategories_table(csv):\n table = []\n data = open(Path(csv))\n content = data.read()\n inputdata = content.split('\\n')\n for i in range(len(inputdata)):\n temp = inputdata[i].split(';')\n if temp[2] == '\\\\N':\n new_row = [temp[0], temp[1], 0]\n table.append(new_row)\n return table\n\ndef get_preferences(list, csv, tours = 0):\n child_categories = create_childCategories_table(csv)\n parent_categories = create_parentCategories_table(csv)\n\n #ANZAHL SCHIERIGKEITEN (PROPERTY GILT NUR FÜR TOURS)\n if tours == 1:\n schwierigkeiten = [0, 0, 0]\n\n for i in range(len(list)):\n entity = []\n #entity = getEnitity(list[i][7])\n\n #ANZAHL SCHIERIGKEITEN ERMITTELN\n if tours == 1:\n if entity['data']['difficulty'] == 1:\n schwierigkeiten[0] = schwierigkeiten[0] + 1\n elif entity['data']['difficulty'] == 2:\n schwierigkeiten[1] = schwierigkeiten[1] + 1\n elif entity['data']['difficulty'] == 3:\n schwierigkeiten[2] = schwierigkeiten[2] + 1\n\n #ANZAHL CHILD KATEGORIEN ERMITTELN\n for j in range(len(child_categories)):\n if entity['data']['types']['data']['id'] == child_categories[j][0]:\n child_categories[j][4] = child_categories[j][4] + 1\n break\n\n #BESTIMME DIE AM HÄUFIGSTEN VORKOMMENDE SCHWIERIGKEIT\n if tours == 1:\n schwierigkeit = ['Schwierigkeit', 0]\n schiwerigkeiten_max = 0\n for i in len(schwierigkeiten):\n if schwierigkeiten[i] > schiwerigkeiten_max:\n schwierigkeit[1] = i + 1\n\n #ANZAHL PARENT KATEGORIEN ERMITTELN\n for i in range(len(child_categories)):\n for j in range(len (parent_categories)):\n if child_categories[i][2] == parent_categories[j][0]:\n parent_categories[j][2] = parent_categories[j][2] + 1\n break\n\n if tours == 1:\n parent_categories.append(schwierigkeit)\n return parent_categories\n\ndef create_preferences(events, places, activities, tours, articles = 0):\n Natur = 0\n Kultur_und_Unterhaltung = 0\n Historie_und_Sehenswuerdigkeiten = 0\n Enstpannung = 0\n Genuss = 0\n Sport = 0\n Shopping = 0\n\n #EVENTS\n for i in len(events):\n if events[i][1] == 'Musik & Konzerte':\n Kultur_und_Unterhaltung = Kultur_und_Unterhaltung + events[i][2]\n elif events[i][1] == 'Tanz':\n Kultur_und_Unterhaltung = Kultur_und_Unterhaltung + events[i][2]\n elif events[i][1] == 'Theater':\n Kultur_und_Unterhaltung = Kultur_und_Unterhaltung + events[i][2]\n elif events[i][1] == 'Literatur':\n Kultur_und_Unterhaltung = Kultur_und_Unterhaltung + events[i][2]\n Enstpannung = Enstpannung + events[i][2]\n elif events[i][1] == 'Austellungen':\n Kultur_und_Unterhaltung = Kultur_und_Unterhaltung + events[i][2]\n elif events[i][1] == 'Film':\n Kultur_und_Unterhaltung = Kultur_und_Unterhaltung + events[i][2]\n Enstpannung = Enstpannung + events[i][2]\n elif events[i][1] == 'Feste':\n Kultur_und_Unterhaltung = Kultur_und_Unterhaltung + events[i][2]\n elif events[i][1] == 'Nightlife':\n Kultur_und_Unterhaltung = Kultur_und_Unterhaltung + events[i][2]\n elif events[i][1] == 'Essen & Trinken':\n Genuss = Genuss + events[i][2]\n elif events[i][1] == 'Führung':\n Historie_und_Sehenswuerdigkeiten = Historie_und_Sehenswuerdigkeiten + events[i][2]\n Kultur_und_Unterhaltung = Kultur_und_Unterhaltung + events[i][2]\n elif events[i][1] == 'Vorträge & Tagungen':\n Historie_und_Sehenswuerdigkeiten = Historie_und_Sehenswuerdigkeiten + events[i][2]\n Kultur_und_Unterhaltung = Kultur_und_Unterhaltung + events[i][2]\n elif events[i][1] == 'Kurse & Workshops':\n Entspannung = Enstpannung + events[i][2]\n Kultur_und_Unterhaltung = Kultur_und_Unterhaltung + events[i][2]\n elif events[i][1] == 'Sport & Freizeit':\n Sport = Sport + events[i][2]\n Natur = Natur + events[i][2]\n elif events[i][1] == 'Märkte & Messen':\n Shopping = Shopping + events[i][2]\n Kultur_und_Unterhaltung = Kultur_und_Unterhaltung + events[i][2]\n elif events[i][1] == 'Eventserie':\n Kultur_und_Unterhaltung = Kultur_und_Unterhaltung + events[i][2]\n #elif events[i][1] == 'Sonstiges':\n\n #ACTIVITIES\n for i in len(activities):\n if activities[i][1] == 'Ski & Snowboard':\n Sport = Sport + activities[i][2]\n Natur = Natur + activities[i][2]\n elif activities[i][1] == 'Radsport':\n Sport = Sport + activities[i][2]\n Natur = Natur + activities[i][2]\n elif activities[i][1] == 'Boot - & Wassersport':\n Sport = Sport + activities[i][2]\n Natur = Natur + activities[i][2]\n elif activities[i][1] == 'Tauchen & Schwimmen':\n Sport = Sport + activities[i][2]\n Entspannung = Entspannung + activities[i][2]\n elif activities[i][1] == 'Luftsport':\n Sport = Sport + activities[i][2]\n Natur = Natur + activities[i][2]\n elif activities[i][1] == 'Kraft & Fitness':\n Sport = Sport + activities[i][2]\n elif activities[i][1] == 'Ballsport':\n Sport = Sport + activities[i][2]\n elif activities[i][1] == 'Motorsport':\n Sport = Sport + activities[i][2]\n elif activities[i][1] == 'Berge & Co.':\n Natur = Natur + activities[i][2]\n elif activities[i][1] == 'Handwerk':\n Kultur_und_Unterhaltung = Kultur_und_Unterhaltung + activities[i][2]\n elif activities[i][1] == 'Ausgefallenes':\n Sport = Sport + activities[i][2]\n Kultur_und_Unterhaltung = Kultur_und_Unterhaltung + activities[i][2]\n\n #TOURS\n for i in len(tours):\n if tours[i][1] == 'Radtouren':\n Sport = Sport + tours[i][2]\n Natur = Natur + tours[i][2]\n elif tours[i][1] == 'Wandertouren':\n Sport = Sport + tours[i][2]\n Natur = Natur + tours[i][2]\n elif tours[i][1] == 'Klettertouren':\n Sport = Sport + tours[i][2]\n Natur = Natur + tours[i][2]\n elif tours[i][1] == 'Wintertouren':\n Sport = Sport + tours[i][2]\n Natur = Natur + tours[i][2]\n elif tours[i][1] == 'Spaziergänge':\n Entspannung = Entspannung + 1\n Natur = Natur + tours[i][2]\n elif tours[i][1] == 'Laufen':\n Sport = Sport + tours[i][2]\n Natur = Natur + tours[i][2]\n\n #Rendundanz: 30/32 Supermarkt und Kaufhaus\n #PLACES\n for i in len(places):\n if places[i][1] == 'Baugewerbe':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Beherbergung & Übernachtung':\n Entspannung = Entspannung + places[i][2]\n elif places[i][1] == 'Beratung':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Einzelhandel':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Elektronik':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Finanzen, Recht & Beratung':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Foto, Video & Druck':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Freizeit':\n Kultur_und_Unterhaltung = Kultur_und_Unterhaltung + places[i][2]\n elif places[i][1] == 'Gastronomie':\n Genuss = Genuss + places[i][2]\n elif places[i][1] == 'Gesundheit':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Handwerk':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Haushalt & Möbel':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Hobby & Spiel':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'IT - Dienstleistungen':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'KFZ':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Körper & Pflege':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Kunst, Sammeln & Antiquität':\n Shopping = Shopping + places[i][2]\n Kultur_und_Unterhaltung = Kultur_und_Unterhaltung + places[i][2]\n elif places[i][1] == 'Lebensmittel':\n Shopping = Shopping + places[i][2]\n Genuss = Genuss + places[i][2]\n elif places[i][1] == 'Lesen & Schreiben':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Lieferdienste':\n Shopping = Shopping + places[i][2]\n Genuss = Genuss + places[i][2]\n elif places[i][1] == 'Lokales':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Geschäft':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Mode':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Musik, Film & Videospiele':\n Shopping = Shopping + places[i][2]\n Kultur_und_Unterhaltung = Kultur_und_Unterhaltung + places[i][2]\n elif places[i][1] == 'Schule, Unterricht & Nachhilfe':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Produktion & Fertigung':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Schmuck & Accessoires':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Sehenswürdigkeit':\n Historie_und_Sehenswuerdigkeiten = Historie_und_Sehenswuerdigkeiten + places[i][2]\n elif places[i][1] == 'Sicherheits - Services':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Sport':\n Sport = Sport + places[i][2]\n elif places[i][1] == 'Supermarkt & Kaufhaus':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Textil - Services':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Tiere & Tierbedarf':\n Shopping = Shopping + places[i][2]\n Natur = Natur + places[i][2]\n elif places[i][1] == 'Transport & Reisen':\n Shopping = Shopping + places[i][2]\n Natur = Natur + places[i][2]\n elif places[i][1] == 'Trauer':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Unterhaltung':\n Kultur_und_Unterhaltung = Kultur_und_Unterhaltung + places[i][2]\n elif places[i][1] == 'Veranstaltungsdienstleistungen':\n Shopping = Shopping + places[i][2]\n Kultur_und_Unterhaltung = Kultur_und_Unterhaltung + places[i][2]\n elif places[i][1] == 'Vereine & Verbände':\n Kultur_und_Unterhaltung = Kultur_und_Unterhaltung + places[i][2]\n elif places[i][1] == 'Vermittlung':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Werbung & Medien':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Wissenschaft & Forschung':\n Historie_und_Sehenswuerdigkeiten = Historie_und_Sehenswuerdigkeiten + places[i][2]\n Kultur_und_Unterhaltung = Kultur_und_Unterhaltung + places[i][2]\n Entspannung = Entspannung + places[i][2]\n elif places[i][1] == 'Wohnen, Garten & Werken':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Tourismus - Hilfen':\n Entspannung = Entspannung + places[i][2]\n elif places[i][1] == 'Speisen & Getränke':\n Genuss = Genuss + places[i][2]\n elif places[i][1] == 'Vermietungen':\n Shopping = Shopping + places[i][2]\n Sport = Sport + places[i][2]\n elif places[i][1] == 'Holz':\n Shopping = Shopping + places[i][2]\n Historie_und_Sehenswuerdigkeiten = Historie_und_Sehenswuerdigkeiten + places[i][2]\n elif places[i][1] == 'Installation & Facility':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Körperpflege & Dienstleistungen':\n Shopping = Shopping + places[i][2]\n Entspannung = Entspannung + places[i][2]\n elif places[i][1] == 'Kunsthandwerk':\n Shopping = Shopping + places[i][2]\n Kultur_und_Unterhaltung = Kultur_und_Unterhaltung + places[i][2]\n elif places[i][1] == 'Medien, Design & IT':\n Shopping = Shopping + places[i][2]\n Kultur_und_Unterhaltung = Kultur_und_Unterhaltung + places[i][2]\n elif places[i][1] == 'Metall':\n Shopping = Shopping + places[i][2]\n Historie_und_Sehenswuerdigkeiten = Historie_und_Sehenswuerdigkeiten + places[i][2]\n elif places[i][1] == 'Nahrungsmittel':\n Shopping = Shopping + places[i][2]\n Genuss = Genuss + places[i][2]\n elif places[i][1] == 'Textil':\n Shopping = Shopping + places[i][2]\n elif places[i][1] == 'Transport':\n Shopping = Shopping + places[i][2]\n #elif places[i][1] == 'Unbekannt':\n\n Summe = Natur + Kultur_und_Unterhaltung + Historie_und_Sehenswuerdigkeiten + Entspannung + Genuss + Sport + Shopping\n preferences = [Summe, Natur, Kultur_und_Unterhaltung, Historie_und_Sehenswuerdigkeiten, Entspannung, Genuss, Sport, Shopping]\n return preferences\n","sub_path":"bin/preferences.py","file_name":"preferences.py","file_ext":"py","file_size_in_byte":13948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"270098826","text":"# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Trains the N-styles style transfer model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport ast\nimport sys\nimport os\n\n# This is needed since the notebook is stored in the object_detection folder.\nTF_API=\"/home/ubuntu/eclipse-workspace/Github/magenta/magenta/models/image_stylization\"\nsys.path.append(os.path.split(TF_API)[0])\nsys.path.append(TF_API)\n\n# internal imports\nimport tensorflow as tf\n\nfrom image_stylization import image_utils\nfrom image_stylization import learning\nfrom image_stylization import model\nfrom image_stylization import vgg\n'''\npython image_stylization_train.py \\\n --train_dir=./tmp/image_stylization/run1/train \\\n --style_dataset_file=./tmp/image_stylization/style_images.tfrecord \\\n --num_styles=7 \\\n --vgg_checkpoint=pretrained/vgg_16.ckpt \\\n --imagenet_data_dir=imagenet-data/tfrecord\n'''\nslim = tf.contrib.slim\n\nDEFAULT_CONTENT_WEIGHTS = '{\"vgg_16/conv3\": 1.0}'\nDEFAULT_STYLE_WEIGHTS = ('{\"vgg_16/conv1\": 1e-4, \"vgg_16/conv2\": 1e-4,'\n ' \"vgg_16/conv3\": 1e-4, \"vgg_16/conv4\": 1e-4}')\n\nflags = tf.app.flags\nflags.DEFINE_float('clip_gradient_norm', 0, 'Clip gradients to this norm')\nflags.DEFINE_float('learning_rate', 1e-4, 'Learning rate')\nflags.DEFINE_integer('batch_size', 12, 'Batch size.')\nflags.DEFINE_integer('image_size', 256, 'Image size.')\nflags.DEFINE_integer('ps_tasks', 0,\n 'Number of parameter servers. If 0, parameters '\n 'are handled locally by the worker.')\nflags.DEFINE_integer('num_styles', None, 'Number of styles.')\nflags.DEFINE_integer('save_summaries_secs', 600,\n 'Frequency at which summaries are saved, in seconds.')\nflags.DEFINE_integer('save_interval_secs', 600,\n 'Frequency at which the model is saved, in seconds.')\nflags.DEFINE_integer('task', 0,\n 'Task ID. Used when training with multiple '\n 'workers to identify each worker.')\nflags.DEFINE_integer('train_steps', 400000, 'Number of training steps.')\nflags.DEFINE_string('content_weights', DEFAULT_CONTENT_WEIGHTS,\n 'Content weights')\nflags.DEFINE_string('master', '',\n 'Name of the TensorFlow master to use.')\nflags.DEFINE_string('style_coefficients', None,\n 'Scales the style weights conditioned on the style image.')\nflags.DEFINE_string('style_dataset_file', None, 'Style dataset file.')\nflags.DEFINE_string('style_weights', DEFAULT_STYLE_WEIGHTS, 'Style weights')\nflags.DEFINE_string('train_dir', None,\n 'Directory for checkpoints and summaries.')\nflags.DEFINE_integer('log_steps', 2,\n 'Display logging information at every log_steps.')\nFLAGS = flags.FLAGS\n\n\ndef main(unused_argv=None):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n with tf.Graph().as_default():\n # Force all input processing onto CPU in order to reserve the GPU for the\n # forward inference and back-propagation.\n device = '/cpu:0' if not FLAGS.ps_tasks else '/job:worker/cpu:0'\n with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks,\n worker_device=device)):\n inputs, _ = image_utils.imagenet_inputs(FLAGS.batch_size,\n FLAGS.image_size)\n # Load style images and select one at random (for each graph execution, a\n # new random selection occurs)\n _, style_labels, style_gram_matrices = image_utils.style_image_inputs(\n os.path.expanduser(FLAGS.style_dataset_file),\n batch_size=FLAGS.batch_size, image_size=FLAGS.image_size,\n square_crop=True, shuffle=True)\n\n with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):\n # Process style and weight flags\n num_styles = FLAGS.num_styles\n if FLAGS.style_coefficients is None:\n style_coefficients = [1.0 for _ in range(num_styles)]\n else:\n style_coefficients = ast.literal_eval(FLAGS.style_coefficients)\n if len(style_coefficients) != num_styles:\n raise ValueError(\n 'number of style coefficients differs from number of styles')\n content_weights = ast.literal_eval(FLAGS.content_weights)\n style_weights = ast.literal_eval(FLAGS.style_weights)\n\n # Rescale style weights dynamically based on the current style image\n style_coefficient = tf.gather(\n tf.constant(style_coefficients), style_labels)\n style_weights = dict([(key, style_coefficient * value)\n for key, value in style_weights.iteritems()])\n\n # Define the model\n stylized_inputs = model.transform(\n inputs,\n normalizer_params={\n 'labels': style_labels,\n 'num_categories': num_styles,\n 'center': True,\n 'scale': True})\n\n # Compute losses.\n total_loss, loss_dict = learning.total_loss(\n inputs, stylized_inputs, style_gram_matrices, content_weights,\n style_weights)\n '''\n inputs: Tensor(\"batch_processing/Reshape_4:0\", shape=(12, 256, 256, 3), dtype=float32) ,content image\n stylized_inputs: Tensor(\"transformer/expand/conv3/conv/Sigmoid:0\", shape=(12, ?, ?, 3), dtype=float32) ,pastiche image\n style_gram_matrices dict: {} \n 'vgg_16/conv1' () Tensor: Tensor(\"style_image_processing/batch:2\", shape=(12, 64, 64), dtype=float32) \n 'vgg_16/conv2' () Tensor: Tensor(\"style_image_processing/batch:4\", shape=(12, 128, 128), dtype=float32) \n 'vgg_16/conv3' () Tensor: Tensor(\"style_image_processing/batch:6\", shape=(12, 256, 256), dtype=float32) \n 'vgg_16/conv4' () Tensor: Tensor(\"style_image_processing/batch:8\", shape=(12, 512, 512), dtype=float32) \n 'vgg_16/conv5' () Tensor: Tensor(\"style_image_processing/batch:10\", shape=(12, 512, 512), dtype=float32) \n 'vgg_16/pool1' () Tensor: Tensor(\"style_image_processing/batch:3\", shape=(12, 64, 64), dtype=float32) \n 'vgg_16/pool2' () Tensor: Tensor(\"style_image_processing/batch:5\", shape=(12, 128, 128), dtype=float32) \n 'vgg_16/pool3' () Tensor: Tensor(\"style_image_processing/batch:7\", shape=(12, 256, 256), dtype=float32) \n 'vgg_16/pool4' () Tensor: Tensor(\"style_image_processing/batch:9\", shape=(12, 512, 512), dtype=float32) \n 'vgg_16/pool5' () Tensor: Tensor(\"style_image_processing/batch:11\", shape=(12, 512, 512), dtype=float32)\n content_weights dict: {} \n 'vgg_16/conv3' () float: 1.0\n style_weights dict: {} \n 'vgg_16/conv1' () Tensor: Tensor(\"mul:0\", shape=(12,), dtype=float32) \n 'vgg_16/conv2' () Tensor: Tensor(\"mul_2:0\", shape=(12,), dtype=float32) \n 'vgg_16/conv3' () Tensor: Tensor(\"mul_1:0\", shape=(12,), dtype=float32) \n 'vgg_16/conv4' () Tensor: Tensor(\"mul_3:0\", shape=(12,), dtype=float32) \n '''\n for key, value in loss_dict.iteritems():\n tf.summary.scalar(key, value)\n\n # Set up training\n optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)\n train_op = slim.learning.create_train_op(\n total_loss, optimizer, clip_gradient_norm=FLAGS.clip_gradient_norm,\n summarize_gradients=False)\n\n # Function to restore VGG16 parameters\n # TODO(iansimon): This is ugly, but assign_from_checkpoint_fn doesn't\n # exist yet.\n saver = tf.train.Saver(slim.get_variables('vgg_16'))\n def init_fn(session):\n saver.restore(session, vgg.checkpoint_file())\n\n # Run training\n slim.learning.train(\n train_op=train_op,\n logdir=os.path.expanduser(FLAGS.train_dir),\n log_every_n_steps=FLAGS.log_steps,\n master=FLAGS.master,\n is_chief=FLAGS.task == 0,\n number_of_steps=FLAGS.train_steps,\n init_fn=init_fn,\n save_summaries_secs=FLAGS.save_summaries_secs,\n save_interval_secs=FLAGS.save_interval_secs)\n\n\ndef console_entry_point():\n tf.app.run(main)\n\n\nif __name__ == '__main__':\n console_entry_point()\n","sub_path":"image_stylization/image_stylization_train.py","file_name":"image_stylization_train.py","file_ext":"py","file_size_in_byte":8759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"142123684","text":"from heapq import heappush, heappop\nn = int(input())\nG = [[] for i in range(n)]\nfor i in range(n-1):\n a, b, c = map(int, input().split())\n G[a-1].append((b-1, c))\n G[b-1].append((a-1, c))\n\nq, k = map(int, input().split())\n\ndist = [10**18]*n\nque = [(0, k-1)]\ndist[k-1] = 0\nwhile que:\n cost, v = heappop(que)\n if dist[v] < cost:\n continue\n for t, c in G[v]:\n if cost + c < dist[t]:\n dist[t] = cost + c\n heappush(que, (cost + c, t))\nans = []\nfor i in range(q):\n x, y = map(int, input().split())\n ans.append(\"%d\" % (dist[x-1] + dist[y-1]))\nprint(\"\\n\".join(ans))\n\n","sub_path":"work/atcoder/abc/abc070/D/answers/505543_aketake08.py","file_name":"505543_aketake08.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"16110495","text":"class Solution:\n # @return a list of lists of length 3, [[val1,val2,val3]]\n\tdef threeSum(self, num):\n\t\tnum.sort()\n\t\tresult = []\n\t\tfor i in range(len(num)-2):\n\t\t\tif i > 0 and num[i] == num[i-1]:\n\t\t\t\tcontinue\n\t\t\tj = i+1\n\t\t\tk = len(num) - 1\n\t\t\ts = -num[i]\n\t\t\twhile j < k:\n\t\t\t\tif num[j] + num[k] == s:\n\t\t\t\t\tresult.append([num[i], num[j], num[k]])\n\t\t\t\t\tj += 1\n\t\t\t\t\twhile j < k:\n\t\t\t\t\t\tif num[j] == num[j-1]:\n\t\t\t\t\t\t\tj += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\twhile k > j:\n\t\t\t\t\t\tif num[k] == num[k-1]:\n\t\t\t\t\t\t\tk -= 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbreak\n\t\t\t\telif num[j] + num[k] < s:\n\t\t\t\t\tj += 1\n\t\t\t\telse:\n\t\t\t\t\tk -= 1\t\t\t\n\t\treturn result\n","sub_path":"LeetCode/Solved/oj015.py","file_name":"oj015.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"56901583","text":"#Authors: Harry Thoma and Qi Liang\n\nimport math\nimport random\nimport sys\nsys.path.append(\"..\") #so other modules can be found in parent dir\nfrom Player import *\nfrom Constants import *\nfrom Construction import CONSTR_STATS\nfrom Ant import UNIT_STATS\nfrom Move import Move\nfrom GameState import *\nfrom AIPlayerUtils import *\n\nMY_ID= 0\nENEMY_ID= 1\n\n##\n#AIPlayer\n#Description: The responsbility of this class is to interact with the game by\n#deciding a valid move based on a given game state. This class has methods that\n#will be implemented by students in Dr. Nuxoll's AI course.\n#\n#Variables:\n# playerId - The id of the player.\n##\nclass AIPlayer(Player):\n\n #__init__\n #Description: Creates a new Player\n #\n #Parameters:\n # inputPlayerId - The id to give the new player (int)\n # cpy - whether the player is a copy (when playing itself)\n ##\n def __init__(self, inputPlayerId):\n super(AIPlayer,self).__init__(inputPlayerId, \"MiniMax\")\n \n ##\n #getPlacement\n #\n #Description: called during setup phase for each Construction that\n # must be placed by the player. These items are: 1 Anthill on\n # the player's side; 1 tunnel on player's side; 9 grass on the\n # player's side; and 2 food on the enemy's side.\n #\n #Parameters:\n # construction - the Construction to be placed.\n # currentState - the state of the game at this point in time.\n #\n #Return: The coordinates of where the construction is to be placed\n ##\n def getPlacement(self, currentState):\n global MY_ID, ENEMY_ID\n MY_ID= currentState.whoseTurn\n ENEMY_ID= 1 - MY_ID\n\n #random.seed(373298298)\n numToPlace = 0\n #implemented by students to return their next move\n if currentState.phase == SETUP_PHASE_1: #stuff on my side\n numToPlace = 11\n moves = []\n for i in range(0, numToPlace):\n move = None\n while move == None:\n #Choose any x location\n x = random.randint(0, 9)\n #Choose any y location on your side of the board\n y = random.randint(0, 3)\n #Set the move if this space is empty\n if currentState.board[x][y].constr == None and (x, y) not in moves:\n move = (x, y)\n #Just need to make the space non-empty. So I threw whatever I felt like in there.\n currentState.board[x][y].constr == True\n moves.append(move)\n return moves\n elif currentState.phase == SETUP_PHASE_2: #stuff on foe's side\n numToPlace = 2\n moves = []\n for i in range(0, numToPlace):\n move = None\n while move == None:\n #Choose any x location\n x = random.randint(0, 9)\n #Choose any y location on enemy side of the board\n y = random.randint(6, 9)\n #Set the move if this space is empty\n if currentState.board[x][y].constr == None and (x, y) not in moves:\n move = (x, y)\n #Just need to make the space non-empty. So I threw whatever I felt like in there.\n currentState.board[x][y].constr == True\n moves.append(move)\n return moves\n else:\n return [(0, 0)]\n \n ##\n #getMove\n #Description: Gets the next move from the Player.\n #\n #Parameters:\n # currentState - The state of the current game waiting for the player's move (GameState)\n #\n #Return: The Move to be made\n ##\n def getMove(self, currentState):\n #chache the fastest route between food and tunnel/anthill\n buildCache(currentState)\n\n abRoot = ABSearchNode(None, currentState, None)\n abRoot.expand(maxDepth=3)\n\n return abRoot.bestChild.move\n \n ##\n #getAttack\n #Description: Gets the attack to be made from the Player\n #\n #Parameters:\n # currentState - A clone of the current state (GameState)\n # attackingAnt - The ant currently making the attack (Ant)\n # enemyLocation - The Locations of the Enemies that can be attacked (Location[])\n ##\n def getAttack(self, currentState, attackingAnt, enemyLocations):\n #Attack a random enemy.\n return enemyLocations[random.randint(0, len(enemyLocations) - 1)]\n\n ##\n #registerWin\n #\n # This agent doens't learn\n #\n def registerWin(self, hasWon):\n #method templaste, not implemented\n pass\n\n#Class to represent a MiniMax node and subtree with alpha-beta pruning\nclass ABSearchNode:\n\n #given a parent SearchNode and a Move, create a new SearchNode\n def __init__(self, move, state, parent):\n self.parent = parent\n self.move = move\n self.state = state\n\n if self.parent == None:\n self.depth = 0\n else:\n self.depth = parent.depth + 1\n\n # we are a max node if it is our turn\n self.maxNode = state.whoseTurn == MY_ID\n\n self.initAlphaBeta()\n\n #assess utility, we need this for all nodes (even non-terminal) because we\n #expand in sorted order\n self.evaluation = utility(state, MY_ID) - utility(state, ENEMY_ID)\n\n #track whose turn it is\n if move is None:\n self.turn = 0\n elif move.moveType == END:\n self.turn = parent.turn + 1\n else:\n self.turn = parent.turn\n\n # stop if good enough\n if self.depth == 0:\n self.beta = self.evaluation + 20\n\n self.children = []\n\n #initilize alpha-beta values by correctly iheriting from ancestors\n def initAlphaBeta(self):\n self.alpha = -math.inf\n self.beta = math.inf\n\n seenMax = False\n seenMin = False\n\n #find first matching ancestor\n currAncestor = self.parent\n\n while currAncestor is not None and not seenMax and not seenMin:\n if currAncestor.maxNode and not seenMax:\n self.alpha = currAncestor.alpha\n elif not currAncestor.maxNode and not seenMin:\n self.beta = currAncestor.beta\n\n currAncestor = currAncestor.parent\n\n # is this an interesting node?\n # i.e should we expand it\n def interesting(self, maxDepth):\n # root node is interesting\n if self.move is None:\n return True\n\n #dont expand past a win node\n if getWinner(self.state) is not None:\n return False\n\n if self.depth >= maxDepth:\n return False\n\n #stop expanding when it's the enemy's turn\n if self.parent.turn == 1:\n return False\n\n return True\n\n # recursively expand the subtree rooted at this node, up to maxDepth deep\n def expand(self, maxDepth):\n # is this a terminal node?\n if not self.interesting(maxDepth):\n self.alpha = self.evaluation\n self.beta = self.evaluation\n return\n\n allMoves = listAllLegalMoves(self.state)\n\n # create and sort children based on utility evaluation\n self.children = [ABSearchNode(move, getNextStateAdversarial(self.state,move), self) for move in allMoves]\n self.children.sort(reverse=True)\n for child in self.children:\n\n child.expand(maxDepth)\n\n if self.maxNode:\n self.alpha = max(self.alpha, child.alpha)\n else:\n self.beta = min(self.beta, child.beta)\n\n # stop expanding if no longer relevant\n if self.alpha >= self.beta:\n break\n\n if self.maxNode:\n self.bestChild = max(self.children)\n else:\n self.bestChild = min(self.children)\n\n self.evaluation = self.bestChild.evaluation\n\n def __str__(self):\n return '<{} {} {}>'.format(self.move, self.evaluation, self.depth)\n\n def __repr__(self):\n return str(self)\n\n def __lt__(self, other):\n return self.evaluation < other.evaluation\n\n# hold non-changing but relevant values for the utility function\n# in particular, the fastest route between food and tunnel/anthill (deposit)\nclass Cache:\n def __init__(self, state):\n self.foodCoords = [0]*2\n self.depositCoords = [0]*2\n self.rtt = [0]*2\n\n foods = getConstrList(state, None, (FOOD,))\n for player in [0,1]:\n deposits = getConstrList(state, player, (ANTHILL, TUNNEL))\n\n #find the best combo, based on steps to reach one to the other\n bestCombo = min([(d, f) for d in deposits for f in foods], key=lambda pair: stepsToReach(state, pair[0].coords, pair[1].coords))\n\n self.depositCoords[player] = bestCombo[0].coords\n self.foodCoords[player] = bestCombo[1].coords\n\n self.rtt[player] = approxDist(self.depositCoords[player], self.foodCoords[player])+1\n\nglobalCache = None\n\ndef buildCache(state):\n global globalCache\n\n if globalCache is None or not cacheValid(state):\n globalCache = Cache(state)\n\n#check whether the cache still refers to the current game\ndef cacheValid(state):\n allFood = [food.coords for food in getConstrList(state, None, (FOOD,))]\n allDeposits = [deposit.coords for deposit in getConstrList(state, None, (ANTHILL, TUNNEL))]\n return all(foodCoord in allFood for foodCoord in globalCache.foodCoords) and \\\n all(depositCoord in allDeposits for depositCoord in globalCache.depositCoords)\n\n# evaluate the utility of a state from a given player's perspective\n# return a tuple of relevant unweighted components\ndef utilityComponents(state, perspective):\n enemy = 1-perspective\n\n # get lists for ants\n myWorkers = getAntList(state, perspective, types=(WORKER,))\n enemyWorkers = getAntList(state, enemy, types=(WORKER,))\n\n myWarriors = getAntList(state, perspective, types=(DRONE,SOLDIER,R_SOLDIER))\n enemyWarriors = getAntList(state, enemy, types=(DRONE,SOLDIER,R_SOLDIER))\n\n myQueen = state.inventories[perspective].getQueen()\n enemyQueen = state.inventories[enemy].getQueen()\n\n foodCoords = globalCache.foodCoords[perspective]\n depositCoords = globalCache.depositCoords[perspective]\n anthillCoords = state.inventories[perspective].getAnthill().coords\n\n # it's bad if the queen is on the food\n queenInTheWayScore = 0\n\n queenCoords = myQueen.coords\n if queenCoords in [foodCoords, depositCoords, anthillCoords]:\n queenInTheWayScore -= 1\n\n queenHealthScore = myQueen.health\n\n workerDistScore = 0\n workerDangerScore = 0\n for worker in myWorkers:\n\n # If the worker is carrying food, add the distance to the tunnel to the score\n if worker.carrying == True:\n distanceFromTunnel = approxDist(worker.coords, depositCoords)\n workerDistScore -= distanceFromTunnel\n\n # if the worker is not carrying food, add the distance from the food and tunnel to the score\n else:\n distTunnelFood = approxDist(foodCoords, depositCoords)\n workerDistScore -= distTunnelFood\n distanceFromFood = approxDist(worker.coords, foodCoords)\n workerDistScore -= distanceFromFood\n\n #its bad to be close to enemy warriors\n for warrior in enemyWarriors:\n #warriorRange = UNIT_STATS[warrior.type][RANGE] + UNIT_STATS[warrior.type][MOVEMENT]\n if approxDist(worker.coords, warrior.coords) < 2:\n workerDangerScore -= 1\n\n # Aim to attack workers, if there are no workers, aim to attack queen\n if len(enemyWorkers) != 0:\n targetCoords = enemyWorkers[0].coords\n else:\n targetCoords = enemyQueen.coords\n\n warriorDistScore = 0\n # Add distance from fighter ants to their targets to score, with a preference to move vertically\n for warrior in myWarriors:\n warriorDistScore -= (warrior.coords[0] - targetCoords[0])**2\n warriorDistScore -= (warrior.coords[1] - targetCoords[1])**2\n\n #do we have an attacker?\n attackScore = UNIT_STATS[myWarriors[0].type][ATTACK] if len(myWarriors) == 1 else 0\n\n # punishment for if the enemy has workers\n enemyWorkerScore = - (len(enemyWorkers) * len(myWarriors))\n\n # Heavy punishment for not having workers, since workers are needed to win\n noWorkerScore = -1 if len(myWorkers) == 0 else 0\n\n foodScore = state.inventories[perspective].foodCount\n\n antCountScore = -len(getAntList(state, MY_ID)) if perspective == MY_ID else 0\n\n return (queenInTheWayScore, workerDistScore, workerDangerScore, warriorDistScore, enemyWorkerScore,\n noWorkerScore, foodScore, attackScore, antCountScore, queenHealthScore)\n\n# evaluate the given state from the given player's perspective\n# by assigning weights to components\ndef utility(state, perspective):\n INF = 10e6\n # return an arbitrarily small score if we're the winner\n winner = getWinner(state)\n if winner is not None:\n if (winner==1 and state.whoseTurn == perspective) or \\\n (winner==0 and state.whoseTurn != perspective):\n return INF\n else:\n return 0\n\n\n components = utilityComponents(state, perspective)\n #weights determined emperically, food score weighted by round trip time between food and deposit\n weights = (50, 50, 15, 4, 0,\n 300, 100*globalCache.rtt[state.whoseTurn], 250*globalCache.rtt[state.whoseTurn], 300, 10)\n\n return sum(a*b for a,b in zip(weights, components))\n\n##\n# getNextStateAdversarial\n#\n# we copied this because we wanted to model attacking\n#\n# Description: This is the same as getNextState (above) except that it properly\n# updates the hasMoved property on ants and the END move is processed correctly.\n#\n# Parameters:\n# currentState - A clone of the current state (GameState)\n# move - The move that the agent would take (Move)\n#\n# Return: A clone of what the state would look like if the move was made\n##\ndef getNextStateAdversarial(currentState, move):\n # variables I will need\n nextState = getNextState(currentState, move)\n myInv = getCurrPlayerInventory(nextState)\n myAnts = myInv.ants\n\n # If an ant is moved update their coordinates and has moved\n if move.moveType == MOVE_ANT:\n endingCoord = move.coordList[-1]\n for ant in myAnts:\n if ant.coords == endingCoord:\n ant.hasMoved = True\n\n #attack an enemy if they are next to the soldier\n if ant.type in [SOLDIER, DRONE, R_SOLDIER]:\n for enemy in nextState.inventories[1-currentState.whoseTurn].ants:\n if approxDist(enemy.coords, ant.coords) < 2:\n enemy.health-= UNIT_STATS[ant.type][ATTACK]\n if enemy.health < 1:\n nextState.inventories[1 - currentState.whoseTurn].ants.remove(enemy)\n break\n\n elif move.moveType == END:\n for ant in myAnts:\n ant.hasMoved = False\n nextState.whoseTurn = 1 - currentState.whoseTurn\n\n elif move.moveType == BUILD:\n getAntAt(nextState, move.coordList[0]).hasMoved= True\n\n return nextState","sub_path":"ReAntics/src/AI/MiniMax.py","file_name":"MiniMax.py","file_ext":"py","file_size_in_byte":15261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"43882716","text":"# github.com/gentlespoon\n# 0001 - Easy - Two Sum\n# Start 2020-06-01-11-24-18\n# Finish 2020-06-01-11-37-16\n\n# Runtime: 48 ms, faster than 79.68% of Python3 online submissions for Two Sum.\n# Memory Usage: 15.2 MB, less than 5.11% of Python3 online submissions for Two Sum.\n\n\n\n# Given an array of integers, return indices of the two numbers such that they add up to a specific target.\n# You may assume that each input would have exactly one solution, and you may not use the same element twice.\n\n# Example:\n# Given nums = [2, 7, 11, 15], target = 9,\n# Because nums[0] + nums[1] = 2 + 7 = 9,\n# return [0, 1].\n\n\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n hash_table = {}\n for i, num in enumerate(nums):\n if target - num in hash_table:\n return ([hash_table[target - num], i])\n break\n hash_table[num] = i\n return ([])","sub_path":"0001_E_Two-Sum.py","file_name":"0001_E_Two-Sum.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"43252605","text":"import pickle\nimport bz2\n\nmydict={\"name\":\"manan\",\"age\":30,\"sex\":\"male\",\"married\":\"False\"}\n#\n#\n# file1=\"mypickle1\"\n# fileobj= open(file1,\"wb\")\n#\n#\n# pickle.dump(mydict,fileobj)\n\n\nfile=\"mypickle2\"\n\nfile1= open(file,\"wb\")\n#\n# y=pickle.load(file1)\n#\n# print(type(y))\n\na=bz2.BZ2File(file1)\n\npickle.dump(mydict, file1)\n\nfile1.close()","sub_path":"pythonProject/pickletest.py","file_name":"pickletest.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"80973229","text":"\n\nimport tensorflow as tf\n\n\n\nprint(\"TensorFlow Version {}:\".format(tf.VERSION))\n\nhello = tf.constant(\"Hello , Tensorflow!\")\nsession= tf.Session()\nprint(session.run(hello))\n\n\na = tf.constant(10)\nb= tf.constant(20)\n\nprint(session.run(a+b))\n\nsession.close()\n\n","sub_path":"verify_tf_installation.py","file_name":"verify_tf_installation.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"107646503","text":"from data_augmentation.augmentation import AugmentTrain\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\nfrom keras.optimizers import Adam\nfrom loss_function import dice_coef_loss\nimport os\n\ntrain_folder = r'H:/data/TZ roi/data/Train'\nvalidation_folder = r'H:/data/TZ roi/data/Validation'\nstore_folder = r'H:/data/TZ roi/savemodel'\ninput_shape = [160, 160, 1]\nbatch_size = 16\n\nif not os.path.exists(store_folder):\n os.mkdir(store_folder)\n\nnumber_training = len(os.listdir(train_folder))\nnumber_validation = len(os.listdir(validation_folder))\n\n# Generate\ntrain_generator = AugmentTrain(train_folder, batch_size)\nvalidation_generator = AugmentTrain(validation_folder, batch_size)\n\n\n# Model\nfrom u_net import u_net\nfrom saveandload import SaveModel\n\nmodel = u_net(input_shape)\nSaveModel(model, store_folder)\n\ncallbacks = [\n ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=20, mode='min'),\n EarlyStopping(monitor='val_loss', patience=100, mode='min'),\n ModelCheckpoint(filepath=os.path.join(store_folder, 'best_weights.h5'), monitor='val_loss',\n save_best_only=True, mode='min', period=1)\n]\n\n\nmodel.compile(loss=dice_coef_loss, optimizer=Adam(0.001), metrics=[dice_coef_loss])\n\nhistory = model.fit_generator(train_generator, steps_per_epoch=number_training // batch_size, epochs=1000, verbose=1,\n validation_data=validation_generator, validation_steps=number_validation // batch_size,\n callbacks=callbacks)\n\nmodel.save_weights(os.path.join(store_folder, 'last_weights.h5'))\nfrom visualization import show_train_history\nshow_train_history(history, 'loss', 'val_loss')\n\n\n\n\n\n\n\n\n","sub_path":"data_augmentation/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"46026412","text":"\"\"\"\n* user: VR429228000\n* fname: LUCA\n* lname: COSI\n* task: prova-perm-composition\n* score: 100.0\n* date: 2019-02-27 10:27:39.289252\n\"\"\"\n#!/usr/bin/env python3\n# Template di soluzione per perm_composition\n\n# INIZIO area entro la quale ti richiediamo/consigliamo di operare.\n \ndef perm_composition(p1, p2):\n s=[]\n l=len(p1)\n k=0\n i=0\n while i largest_next_room[1]:\n largest_next_room = (key_value[0], unvisited_room_id)\n # replace \"n\": \"*\" with \"n\": \"8\"\n visited[room.id][largest_next_room[0]] = largest_next_room[1]\n # store values of previous room (dir_i_came_from, prev_rm_id)\n\n prev_rm_id = int(room.id)\n prev_room = (get_opposite(largest_next_room[0]), prev_rm_id)\n # * go to next room and add to traversal_path\n my_traversal_path.append(largest_next_room)\n player.travel(largest_next_room[0])\n\n global traversal_path\n for pair in my_traversal_path:\n traversal_path.append(pair[0])\n\n\ndfs()\n\n# TRAVERSAL TEST\nvisited_rooms = set()\nplayer.current_room = world.starting_room\nprint(\"after Here:\\n\", player.current_room,\n \"RIGHT HERE!!!\", \"type:\", type(player.current_room))\nprint(player.current_room.id)\nprint(player.current_room.get_exits())\nprint(player.current_room.get_room_in_direction('s').id)\nprint(player.travel('s'))\nprint(player.current_room.get_room_in_direction('n').id, \"type:\",\n type(player.current_room.get_room_in_direction('n').id))\nprint(player.current_room.id, \"p.cur.id\")\nvisited_rooms.add(player.current_room)\n\nfor move in traversal_path:\n player.travel(move)\n visited_rooms.add(player.current_room)\n\nif len(visited_rooms) == len(room_graph):\n print(\n f\"TESTS PASSED: {len(traversal_path)} moves, {len(visited_rooms)} rooms visited\")\nelse:\n print(\"TESTS FAILED: INCOMPLETE TRAVERSAL\")\n print(f\"{len(room_graph) - len(visited_rooms)} unvisited rooms\")\n\n\n#######\n# UNCOMMENT TO WALK AROUND\n#######\nplayer.current_room.print_room_description(player)\nwhile True:\n cmds = input(\"-> \").lower().split(\" \")\n if cmds[0] in [\"n\", \"s\", \"e\", \"w\"]:\n player.travel(cmds[0], True)\n elif cmds[0] == \"q\":\n break\n else:\n print(\"I did not understand that command.\")\n","sub_path":"adv.py","file_name":"adv.py","file_ext":"py","file_size_in_byte":6527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"569407095","text":"# phase 3\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as f\nfrom torchsummary import summary\n\n\nclass Generator(nn.Module):\n \"\"\"\n Class representing the Generator network to be used.\n \"\"\"\n\n VALID_OUT_FRAMES = (16,)\n\n def __init__(self, in_channels, out_frames, gen_name='Video Generator'):\n \"\"\"\n Initializes the Generator network.\n :param in_channels: (list of ints) The number of channels in each input tensor respectively\n :param out_frames: (int) The number of frames desired in the generated output video.\n Legal values: 8, 16\n :param gen_name: (str, optional) The name of the network (default 'Video Generator').\n Raises:\n ValueError: if 'out_frames' is not a legal value.\n \"\"\"\n if out_frames not in self.VALID_OUT_FRAMES:\n raise ValueError('Invalid number of frames in desired output: %d' % out_frames)\n\n super(Generator, self).__init__()\n self.gen_name = gen_name\n self.out_frames = out_frames\n\n # definition of all network layers\n out_channels = {'conv_1a': 256,\n 'conv_1b': 128,\n 'conv_2a': 64,\n 'conv_2b': 32,\n 'conv_3a': 16,\n 'conv_3b': 8,\n 'conv_4a': 4,\n 'conv_4b': 4,\n 'conv_5a': 4,\n 'conv_5b': 3\n } # key: layer name, value: out_channels\n in_channels = {'conv_1a': sum(in_channels),\n 'conv_1b': out_channels['conv_1a'],\n 'conv_2a': out_channels['conv_1b'] + in_channels[1] + in_channels[2], # + 288\n 'conv_2b': out_channels['conv_2a'],\n 'conv_3a': out_channels['conv_2b'] + in_channels[1] + in_channels[2], # + 288\n 'conv_3b': out_channels['conv_3a'],\n 'conv_4a': out_channels['conv_3b'] + in_channels[1], # + 32\n 'conv_4b': out_channels['conv_4a'],\n 'conv_5a': out_channels['conv_4b'], # + 32\n 'conv_5b': out_channels['conv_5a']\n } # key: layer name, value: in_channels\n\n # block 1\n self.avg_pool_kp_1 = nn.AvgPool3d(kernel_size=(16, 2, 2), stride=(16, 2, 2))\n self.avg_pool_rep_1 = nn.AvgPool3d(kernel_size=(4, 1, 1), stride=(4, 1, 1))\n\n layer = 'conv_1a'\n self.conv3d_1a = nn.Conv3d(in_channels=in_channels[layer], out_channels=out_channels[layer],\n kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1))\n self.relu_1a = nn.ReLU(inplace=True)\n layer = 'conv_1b'\n self.conv3d_1b = nn.Conv3d(in_channels=in_channels[layer], out_channels=out_channels[layer],\n kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1))\n self.relu_1b = nn.ReLU(inplace=True)\n\n # block 2\n self.avg_pool_kp_2 = nn.AvgPool3d(kernel_size=(8, 1, 1), stride=(8, 1, 1))\n self.avg_pool_rep_2 = nn.AvgPool3d(kernel_size=(2, 1, 1), stride=(2, 1, 1))\n\n layer = 'conv_2a'\n self.conv3d_2a = nn.Conv3d(in_channels=in_channels[layer], out_channels=out_channels[layer],\n kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1))\n self.relu_2a = nn.ReLU(inplace=True)\n layer = 'conv_2b'\n self.conv3d_2b = nn.Conv3d(in_channels=in_channels[layer], out_channels=out_channels[layer],\n kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1))\n self.relu_2b = nn.ReLU(inplace=True)\n\n # block 3\n self.avg_pool_kp_3 = nn.AvgPool3d(kernel_size=(4, 1, 1), stride=(4, 1, 1))\n\n layer = 'conv_3a'\n self.conv3d_3a = nn.Conv3d(in_channels=in_channels[layer], out_channels=out_channels[layer],\n kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1))\n self.relu_3a = nn.ReLU(inplace=True)\n layer = 'conv_3b'\n self.conv3d_3b = nn.Conv3d(in_channels=in_channels[layer], out_channels=out_channels[layer],\n kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1))\n self.relu_3b = nn.ReLU(inplace=True)\n\n # block 4\n self.avg_pool_kp_4 = nn.AvgPool3d(kernel_size=(2, 1, 1), stride=(2, 1, 1))\n\n layer = 'conv_4a'\n self.conv3d_4a = nn.Conv3d(in_channels=in_channels[layer], out_channels=out_channels[layer],\n kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1))\n self.relu_4a = nn.ReLU(inplace=True)\n layer = 'conv_4b'\n self.conv3d_4b = nn.Conv3d(in_channels=in_channels[layer], out_channels=out_channels[layer],\n kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1))\n self.relu_4b = nn.ReLU(inplace=True)\n\n # block 5\n layer = 'conv_5a'\n self.conv3d_5a = nn.Conv3d(in_channels=in_channels[layer], out_channels=out_channels[layer],\n kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1))\n self.relu_5a = nn.ReLU(inplace=True)\n layer = 'conv_5b'\n self.conv3d_5b = nn.Conv3d(in_channels=in_channels[layer], out_channels=out_channels[layer],\n kernel_size=(1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0))\n\n self.sigmoid = nn.Sigmoid()\n\n # print('%s Model Successfully Built \\n' % self.gen_name)\n\n def forward(self, app, kp, rep):\n \"\"\"\n Function to compute a single forward pass through the network, according to the architecture.\n :param app: (tensor) The appearance features for the desired view of the output video.\n Must be a tensor of shape: (bsz, 256, 1, 14, 14) for this application.\n :param kp: (tensor) The keypoints for the video action.\n Must be a tensor of shape: (bsz, 32, 16, 28, 28) for this application.\n :param rep: (tensor) The motion representation/features for the video action.\n Must be a tensor of shape: (bsz, 256, 4, 14, 14) for this application.\n :return: A tensor representing the video generated by the network.\n Shape of output is: (bsz, 3, 8/16, 112, 112) for this application.\n \"\"\"\n if len(app.size()) == 4:\n app = torch.unsqueeze(app, dim=2) # dim=frames\n\n # block 1\n kp_block_input = self.avg_pool_kp_1(kp)\n rep_block_input = self.avg_pool_rep_1(rep)\n x = torch.cat([app, kp_block_input, rep_block_input], dim=1) # dim=channels\n\n x = self.conv3d_1a(x)\n x = self.relu_1a(x)\n x = self.conv3d_1b(x)\n x = self.relu_1b(x)\n\n x = f.interpolate(x, size=(2, 28, 28), mode='trilinear')\n\n # block 2\n kp_block_input = self.avg_pool_kp_2(kp)\n rep_block_input = self.avg_pool_rep_2(rep)\n rep_block_input = f.interpolate(rep_block_input, size=(2, 28, 28), mode='trilinear')\n x = torch.cat([x, kp_block_input, rep_block_input], dim=1) # dim=channels\n\n x = self.conv3d_2a(x)\n x = self.relu_2a(x)\n x = self.conv3d_2b(x)\n x = self.relu_2b(x)\n x = f.interpolate(x, size=(4, 56, 56), mode='trilinear')\n\n # block 3\n kp_block_input = self.avg_pool_kp_3(kp)\n kp_block_input = f.interpolate(kp_block_input, size=(4, 56, 56), mode='trilinear')\n rep_block_input = f.interpolate(rep, size=(4, 56, 56), mode='trilinear')\n x = torch.cat([x, kp_block_input, rep_block_input], dim=1) # dim=channels\n\n x = self.conv3d_3a(x)\n x = self.relu_3a(x)\n x = self.conv3d_3b(x)\n x = self.relu_3b(x)\n\n x = f.interpolate(x, size=(8, 112, 112), mode='trilinear')\n\n # block 4\n kp_block_input = self.avg_pool_kp_4(kp)\n kp_block_input = f.interpolate(kp_block_input, size=(8, 112, 112), mode='trilinear')\n x = torch.cat([x, kp_block_input], dim=1) # dim=channels\n\n x = self.conv3d_4a(x)\n x = self.relu_4a(x)\n x = self.conv3d_4b(x)\n x = self.relu_4b(x)\n\n x = f.interpolate(x, size=(16, 112, 112), mode='trilinear')\n\n # block 5\n # kp_block_input = f.interpolate(kp, size=(16, 112, 112), mode='trilinear')\n # x = torch.cat([x, kp_block_input], dim=1) # dim=channels\n\n x = self.conv3d_5a(x)\n x = self.relu_5a(x)\n x = self.conv3d_5b(x)\n x = self.sigmoid(x)\n\n return x\n\n\nif __name__ == \"__main__\":\n print_summary = True\n\n gen = Generator(in_channels=[1, 1, 1], out_frames=16)\n\n if print_summary:\n summary(gen, input_size=[(1, 14, 14), (1, 16, 28, 28), (1, 4, 14, 14)])\n","sub_path":"2recons/newgen/networks/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":8889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"428122426","text":"# -*- coding:utf-8 -*-\nclass RadixSort:\n def __init__(self, data):\n self.data = data\n\n def run(self):\n self.radixsort()\n return self.data\n\n def radixsort(self, radix=10):\n K = int(math.ceil(math.log(max(self.data) + 1, radix))) # 用K位数可表示任意整数\n for i in range(1, K + 1): # K次循环\n bucket = [[] for i in range(radix)] # 不能用 [[]]*radix,否则相当于开了radix个完全相同的list对象\n for val in self.data:\n bucket[val % (radix ** i) // (radix ** (i - 1))].append(val) # 获取整数第K位數字(从低到高)\n del self.data[:]\n for each in bucket:\n self.data.extend(each) # 桶合并\n\n\nif __name__ == \"__main__\":\n import math\n # 基数排序\n s = RadixSort([2, 1, 3, 4, 5, 9, 8, 7, 6, 10, 21])\n print(s.run())\n","sub_path":"Sort/RadixSort.py","file_name":"RadixSort.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"272465442","text":"#An application to remove the product/products from the cart selected previously on the e-commerce platform by the end-user.\r\n\r\nfrom django.shortcuts import render, HttpResponseRedirect\r\nfrom django.core.urlresolvers import reverse\r\n\r\n# Create your views here.\r\n\r\nfrom .models import Cart1, Itemincart\r\n#Cart1 is the class defined in models containing the characteristics & updates for several products of retail\r\n#Itemincart is the class comprising the inheritance of Class Cart1 in it along with the objects like timestamp, updates and quantity of products.\r\n\r\ndef remove_from_cart(request, id):\r\n\ttry:\r\n\t\tthe_id = request.session['cart_id'] \r\n\t\tcart = Cart1.objects.get(id=the_id)\r\n\texcept:\r\n\t\treturn HttpResponseRedirect(reverse(\"cart\"))\r\n\r\n\tcartitem = Itemincart.objects.get(id=id)\r\n\tcartitem.delete()\r\n\tcartitem.cart = None\r\n\tcartitem.save()\r\n\treturn HttpResponseRedirect(reverse(\"cart\"))\r\n\t\t\r\n\r\n","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"296708500","text":"\n# Unit-tests the lexer.\n\n\nimport unittest\nfrom lexer import Lexer\n\n\nclass EmptyInputTest(unittest.TestCase):\n\tdef testNoInput(self):\n\t\t\"\"\"\n\t\ttests whether the lexer returns an empty array for the empty string\n\t\t\"\"\"\n\t\tl = Lexer()\n\t\temptyStr = \"\"\n\t\ttokens = l.lex(emptyStr)\n\n\t\tself.assertEqual(tokens, [])\n\n\nclass BadInputTests(unittest.TestCase):\n\tdef testUnrecognizedSymbolStart(self):\n\t\t\"\"\"\n\t\ttests whether the lexer raises an error on unrecognized symbols at the \n\t\t\tstart of the input\n\t\t\"\"\"\n\t\tl = Lexer()\n\t\tbadStr = \"& 1 + 2\"\n\n\t\twith self.assertRaises(ValueError):\n\t\t\tl.lex(badStr)\n\n\tdef testUnrecognizedSymbolMid(self):\n\t\t\"\"\"\n\t\ttests whether the lexer raises an error on unrecognized symbols in the \n\t\t\tmiddle of the input\n\t\t\"\"\"\n\t\tl = Lexer()\n\t\tbadStr = \"1 + , 2\"\n\n\t\twith self.assertRaises(ValueError):\n\t\t\tl.lex(badStr)\n\n\tdef testUnrecognizedSymbolEnd(self):\n\t\t\"\"\"\n\t\ttests whether the lexer raises an error on unrecognized symbols in the \n\t\t\tmiddle of the input\n\t\t\"\"\"\n\t\tl = Lexer()\n\t\tbadStr = \"1 + 2 ^\"\n\n\t\twith self.assertRaises(ValueError):\n\t\t\tl.lex(badStr)\n\n\nclass ValidInputTest(unittest.TestCase):\n\tdef testValidInput(self):\n\t\t\"\"\"\n\t\ttests whether the lexer returns the correct tokens for a valid \n\t\t\tstring\n\t\t\"\"\"\n\t\tl = Lexer()\n\t\tnormalStr = \"\"\" \t + 1 z29 =\t( / *\t- )\t; 23 a\n\t\t\t3 ab\"\"\"\n\t\ttokens = l.lex(normalStr)\n\n\t\tself.assertEqual(tokens, [('+', 'addop'), ('1', 'num'), ('z29', 'id'), \n\t\t\t('=', 'assign'), ('(', 'paren'), ('/', 'mulop'), ('*', 'mulop'), \n\t\t\t('-', 'addop'), (')', 'paren'), (';', 'sep'), ('23', 'num'), \n\t\t\t('a', 'id'), ('3', 'num'), ('ab', 'id')])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"lexer_tests.py","file_name":"lexer_tests.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"269829681","text":"class Solution(object):\n def canPartition(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n target, sz = sum(nums), len(nums)\n if target & 0x1:\n return False\n target /= 2\n opt = [[False] * (target+1) for _ in range(sz+1)]\n opt[0][0] = True\n for i in range(1, sz+1):\n for t in range(target+1):\n opt[i][t] = opt[i-1][t] or nums[i-1] == target or t >= nums[i-1] and opt[i][t-nums[i-1]]\n return opt[sz][target]\n","sub_path":"Partition Equal Subset Sum.py","file_name":"Partition Equal Subset Sum.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"244213937","text":"from django.conf.urls import patterns, url\n\nurlpatterns = patterns('posts.views',\n\turl(r'^create/$', 'create'),\n\turl(r'^details/$', 'details'),\n\turl(r'^list/$', 'list'),\n\turl(r'^update/$', 'update'),\n\turl(r'^vote/$', 'vote'),\n\turl(r'^remove/$', 'remove'),\n\turl(r'^restore/$', 'restore'),\n)\n","sub_path":"posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"278158218","text":"import pandas as pd\nimport sys\nimport re\nimport math\nimport pickle\n\npd.set_option('display.max_columns', None)\n\ndelta = 0.00000000000001\nmax_depth = 7\n\n\ndef main():\n file_path = sys.argv[1]\n hypothesis_file = sys.argv[2]\n learning_type = sys.argv[3]\n df = create_df(file_path)\n if learning_type == 'dt':\n dTree = decisionTree(df, 0)\n dTree['LearningType'] = 'dt'\n f = open(hypothesis_file + '.pkl', 'wb')\n pickle.dump(dTree, f)\n f.close()\n if learning_type == 'ada':\n estimators = 8\n if estimators > 10:\n estimators = 10\n adaTree = adaBoost(df, estimators)\n adaTree['LearningType'] = 'ada'\n f = open(hypothesis_file + '.pkl', 'wb')\n pickle.dump(adaTree, f)\n f.close()\n\n\ndef decisionTree(df, depth, dTree=None):\n target_entropy = entropy(df['Language'])\n attribute_entropy = get_entropy_attributes(df)\n best_attribute = df.columns[get_best_attribute(attribute_entropy, target_entropy)]\n if dTree is None:\n dTree = {best_attribute: {}}\n if depth > max_depth:\n leaf_node = getLeafNode(df, best_attribute)\n dTree[best_attribute][1] = leaf_node\n if leaf_node == 'en':\n dTree[best_attribute][0] = 'nl'\n else:\n dTree[best_attribute][0]= 'en'\n # if df[best_attribute].value_counts()[1] > df[best_attribute].value_counts()[0]:\n # dTree[best_attribute][1] = leaf_node\n # else:\n # dTree[best_attribute][0] = leaf_node\n return dTree\n df1, df2 = splitData(df, best_attribute)\n flag, result = checkLeafNode(df1)\n if flag:\n dTree[best_attribute][1] = result\n else:\n dTree[best_attribute][1] = decisionTree(df1, depth + 1)\n flag, result = checkLeafNode(df2)\n if flag:\n dTree[best_attribute][0] = result\n else:\n dTree[best_attribute][0] = decisionTree(df2, depth + 1)\n return dTree\n\n\ndef splitData(df, best_attribute):\n df1 = df[df[best_attribute] == 1].reset_index(drop=True)\n df2 = df[df[best_attribute] == 0].reset_index(drop=True)\n return df1, df2\n\n\ndef getLeafNode(df, best_attribute):\n target = df['Language']\n count1 = 0\n count2 = 0\n for val in target:\n if val == 'en':\n count1 += 1\n else:\n count2 += 2\n if count1 > count2:\n return 'en'\n else:\n return 'nl'\n\n\ndef checkLeafNode(df):\n target = df['Language']\n count1 = 0\n count2 = 0\n for val in target:\n if val == 'en':\n count1 += 1\n else:\n count2 += 2\n if count1 == 0:\n return True, 'nl'\n elif count2 == 0:\n return True, 'en'\n else:\n return False, ' '\n\n\ndef entropy(target):\n countEn = 0\n countNl = 0\n for val in target:\n if val == 'en':\n countEn += 1\n else:\n countNl += 1\n probEn = countEn / len(target)\n probNl = countNl / len(target)\n return -probEn * math.log2(probEn) - probNl * math.log2(probNl)\n\n\ndef get_entropy_attributes(df):\n attribute_entropy = []\n columns = df.columns[0:len(df.columns) - 1]\n for col in columns:\n count1 = 0\n count2 = 0\n count3 = 0\n count4 = 0\n for index, row in df.iterrows():\n if row[col] == 1 and row['Language'] == 'en':\n count1 += 1\n elif row[col] == 0 and row['Language'] == 'en':\n count2 += 1\n elif row[col] == 1 and row['Language'] == 'nl':\n count3 += 1\n elif row[col] == 0 and row['Language'] == 'nl':\n count4 += 1\n prob1 = count1 / (len(df[col][df[col] == 1]) + delta)\n prob2 = count2 / (len(df[col][df[col] == 0]) + delta)\n prob3 = count3 / (len(df[col][df[col] == 1]) + delta)\n prob4 = count4 / (len(df[col][df[col] == 0]) + delta)\n f1 = len(df[col][df[col] == 1]) / len(df)\n f2 = len(df[col][df[col] == 0]) / len(df)\n attribute_entropy.append(abs(\n -f1 * (-prob1 * math.log2(prob1 + delta) - prob3 * math.log2(prob3 + delta)) - f2 * (\n -prob2 * math.log2(prob2 + delta)\n - prob4 * math.log2(prob4 + delta))))\n return attribute_entropy\n\n\ndef get_best_attribute(attribute_entropy, target_entropy):\n best_attribute = 0\n max_infoGain = 0\n index = 0\n for att_ent in attribute_entropy:\n if target_entropy - att_ent > max_infoGain:\n best_attribute = index\n max_infoGain = target_entropy - att_ent\n index += 1\n\n return best_attribute\n\n\ndef create_df(file_path):\n dataList = []\n with open(file_path) as f:\n line = f.readline().strip().lower()\n while line:\n tempList = []\n target_sentence_split = line.split('|')\n target = target_sentence_split[0]\n sentence = target_sentence_split[1]\n tempList.append(avgWordCount(sentence))\n tempList.append(checkLetterEFreq(sentence))\n tempList.append(checkLetterNFreq(sentence))\n tempList.append(checkThe(sentence))\n tempList.append(checkDe(sentence))\n tempList.append(checkTwoLetter(sentence))\n tempList.append(checkIJ(sentence))\n tempList.append(countVowel(sentence))\n tempList.append(checkLetterQ(sentence))\n tempList.append(checkWordLength1(sentence))\n tempList.append(target)\n dataList.append(tempList)\n line = f.readline().strip().lower()\n return pd.DataFrame(dataList, columns=['WordCount>5.1', 'LetterEFreq', 'LetterNFreq', 'The?', 'De?',\n 'RepeatLetters', 'IJ?', 'VowelCount>14', 'Q?', 'Len1Words', 'Language'])\n\n\ndef avgWordCount(sentence):\n words = sentence.split()\n count = 0\n for word in words:\n word = re.sub(r'[^\\w\\s]', '', word)\n count += len(word)\n avgCount = count / 15\n if avgCount > 5.1:\n return 1\n else:\n return 0\n\n\ndef checkThe(sentence):\n words = sentence.split()\n for word in words:\n word = re.sub(r'[^\\w\\s]', '', word)\n if word == 'the':\n return 1\n return 0\n\n\ndef checkDe(sentence):\n words = sentence.split()\n for word in words:\n word = re.sub(r'[^\\w\\s]', '', word)\n if word == 'de':\n return 1\n return 0\n\n\ndef checkTwoLetter(sentence):\n words = sentence.split()\n for word in words:\n word = re.sub(r'[^\\w\\s]', '', word)\n for i in range(0, len(word) - 1):\n if word[i] == word[i + 1]:\n return 1\n return 0\n\n\ndef checkLetterNFreq(sentence):\n words = sentence.split()\n totalCount = 0\n countN = 0\n for word in words:\n word = re.sub(r'[^\\w\\s]', '', word)\n totalCount += len(word)\n for letter in word:\n if letter == 'n':\n countN += 1\n if countN / totalCount > 0.085:\n return 1\n else:\n return 0\n\n\ndef checkLetterEFreq(sentence):\n words = sentence.split()\n totalCount = 0\n countE = 0\n for word in words:\n word = re.sub(r'[^\\w\\s]', '', word)\n totalCount += len(word)\n for letter in word:\n if letter == 'e':\n countE += 1\n if countE / totalCount > 0.15:\n return 1\n else:\n return 0\n\n\ndef checkIJ(sentence):\n words = sentence.split()\n for word in words:\n word = re.sub(r'[^\\w\\s]', '', word)\n for i in range(0, len(word) - 1):\n if word[i] == 'i' and word[i + 1] == 'j':\n return 1\n return 0\n\n\ndef countVowel(sentence):\n words = sentence.split()\n vowelList = ['a', 'e', 'i', 'o', 'u']\n count = 0\n for word in words:\n word = re.sub(r'[^\\w\\s]', '', word)\n for i in range(0, len(word) - 1):\n if word[i] in vowelList:\n count += 1\n if count > 14:\n return 1\n else:\n return 0\n\n\ndef checkLetterQ(sentence):\n words = sentence.split()\n for word in words:\n word = re.sub(r'[^\\w\\s]', '', word)\n for letter in word:\n if letter == 'q':\n return 1\n return 0\n\n\ndef checkWordLength1(sentence):\n words = sentence.split()\n for word in words:\n if len(word) == 1:\n return 1\n return 0\n\n\ndef adaBoost(df, estimators):\n unqEstimators = set()\n df['initial_weights'] = 1 / len(df)\n target_entropy = entropy_weight(df)\n attribute_entropy = get_entropy_attributes_weight(df)\n best_attribute = df.columns[get_best_attribute_weights(attribute_entropy, target_entropy, unqEstimators, df)]\n unqEstimators.add(best_attribute)\n index = 1\n adaTree = {}\n for i in range(0, estimators):\n best_attribute = decision_stump(df, best_attribute, index, adaTree, unqEstimators)\n unqEstimators.add(best_attribute)\n return adaTree\n\n\ndef decision_stump(df, attribute, index, adaTree, unqEstimators):\n classify1, classify2, correct_predictions, incorrect_predictions = classify(df, attribute)\n total_weight = 0\n for index, row in df.iterrows():\n if index in incorrect_predictions:\n total_weight += row[-1]\n significance = get_significance(total_weight)\n new_weights = get_new_weights(df, significance, correct_predictions, incorrect_predictions)\n df['new_weights' + str(index)] = normalize(new_weights)\n adaTree[attribute] = {}\n adaTree[attribute][1] = classify1\n adaTree[attribute][0] = classify2\n adaTree[attribute]['significance'] = significance\n next_best_attribute = get_nextBestAttr(df, incorrect_predictions, attribute, unqEstimators)\n return next_best_attribute\n\n\ndef get_best_attribute_weights(attribute_entropy, target_entropy, unqEstimators, df):\n best_attribute = 0\n max_infoGain = 0\n index = 0\n for att_ent in attribute_entropy:\n if target_entropy - att_ent > max_infoGain:\n if df.columns[index] not in unqEstimators:\n best_attribute = index\n max_infoGain = target_entropy - att_ent\n index += 1\n\n return best_attribute\n\n\ndef entropy_weight(df):\n weightEn = 0\n weightNl = 0\n totalWeight = 0\n for index, row in df.iterrows():\n totalWeight += row[-1]\n\n if row['Language'] == 'en':\n weightEn += row[-1]\n else:\n weightNl += row[-1]\n probEn = weightEn / totalWeight\n probNl = weightNl / totalWeight\n return -probEn * math.log2(probEn) - probNl * math.log2(probNl)\n\n\ndef get_entropy_attributes_weight(df):\n attribute_entropy = []\n columns = ['WordCount>5.1', 'LetterEFreq', 'LetterNFreq', 'The?', 'De?',\n 'RepeatLetters', 'IJ?', 'VowelCount>14', 'Q?', 'Len1Words']\n for col in columns:\n count1 = 0\n count2 = 0\n count3 = 0\n count4 = 0\n totalWeight = 0\n for index, row in df.iterrows():\n totalWeight += row[-1]\n if row[col] == 1 and row['Language'] == 'en':\n count1 += row[-1]\n elif row[col] == 0 and row['Language'] == 'en':\n count2 += row[-1]\n elif row[col] == 1 and row['Language'] == 'nl':\n count3 += row[-1]\n elif row[col] == 0 and row['Language'] == 'nl':\n count4 += row[-1]\n prob1 = count1 / totalWeight\n prob2 = count2 / totalWeight\n prob3 = count3 / totalWeight\n prob4 = count4 / totalWeight\n f1 = len(df[col][df[col] == 1]) / len(df)\n f2 = len(df[col][df[col] == 0]) / len(df)\n attribute_entropy.append(abs(\n -f1 * (-prob1 * math.log2(prob1 + delta) - prob3 * math.log2(prob3 + delta)) - f2 * (\n -prob2 * math.log2(prob2 + delta)\n - prob4 * math.log2(prob4 + delta))))\n return attribute_entropy\n\n\ndef get_nextBestAttr(df, incorrect_predictions, attribute, unqEstimators):\n target_entropy = entropy_weight(df)\n attribute_entropy = get_entropy_attributes_weight(df)\n best_attribute = df.columns[get_best_attribute_weights(attribute_entropy, target_entropy, unqEstimators, df)]\n return best_attribute\n\n\ndef normalize(new_weights):\n normalized_weights = []\n Sum = sum(new_weights)\n for weight in new_weights:\n normalized_weights.append(weight / Sum)\n return normalized_weights\n\n\ndef get_new_weights(df, significance, correct_predictions, incorrect_predictions):\n new_weights = []\n for index, row in df.iterrows():\n if index in correct_predictions:\n new_weights.append(row[-1] * math.exp(-significance))\n elif index in incorrect_predictions:\n new_weights.append(row[-1] * math.exp(significance))\n return new_weights\n\n\ndef get_significance(total_weight):\n return (1 / 2) * math.log((1 - total_weight) / total_weight)\n\n\ndef classify(df, attribute):\n countEn_pos = 0\n countEn_neg = 0\n countNl_pos = 0\n countNl_neg = 0\n incorrect_predictions = set()\n correct_predictions = set()\n for index, row in df.iterrows():\n if row[attribute] == 1 and row['Language'] == 'en':\n countEn_pos += 1\n elif row[attribute] == 1 and row['Language'] == 'nl':\n countNl_pos += 1\n elif row[attribute] == 0 and row['Language'] == 'en':\n countEn_neg += 1\n elif row[attribute] == 0 and row['Language'] == 'nl':\n countNl_neg += 1\n if countEn_pos > countNl_pos:\n classify1 = 'en'\n else:\n classify1 = 'nl'\n if countEn_neg > countNl_neg:\n classify2 = 'en'\n else:\n classify2 = 'nl'\n for index, row in df.iterrows():\n if row[attribute] == 1 and row['Language'] == classify1:\n correct_predictions.add(index)\n elif row[attribute] == 1 and row['Language'] != classify1:\n incorrect_predictions.add(index)\n elif row[attribute] == 0 and row['Language'] == classify2:\n correct_predictions.add(index)\n elif row[attribute] == 0 and row['Language'] != classify2:\n incorrect_predictions.add(index)\n return classify1, classify2, correct_predictions, incorrect_predictions\n\n\nmain()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":14170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"335103122","text":"# import pygame, sys\n# from pygame.locals import *\n# from math import *\n\n# pygame.init()\n\n# ancho, alto = 1360,730\n# ventana = pygame.display.set_mode((ancho,alto))\n# ventana.fill(0xFF3c1F)\n# pygame.display.set_caption(\"Prueba\")\n# superficie = pygame.Surface((40,20), pygame.SRCALPHA, 32)\n# superficie.fill(0xFFFFFF)\n# pie = pygame.image.load(\"pie.png\")\n# superficie.blit(pie,(0,0))\n\n# r_90 = pygame.transform.rotate(superficie,90)\n# r_30 = pygame.transform.rotate(superficie,30)\n# r_0 = pygame.transform.rotate(superficie,0)\n# r_180 = pygame.transform.rotate(superficie,180)\n# r_270 = pygame.transform.rotate(superficie,270)\n# r_45 = pygame.transform.rotate(superficie,45)\n\n# def update(superficie):\n# \tescala = 40\n# \talfa = superficie.get_alpha()\n# \tprint alfa\n# \tif alfa - escala > 0:\n# \t\tsuperficie.set_alpha(alfa - escala)\n\n# while True:\n# \tventana.fill(0xFFFFFF)\n\n# \tfor event in pygame.event.get():\n# \t\tif event.type == QUIT:\n# \t\t\tpygame.quit()\n# \t\t\tsys.exit()\n# \t\telif event.type == pygame.KEYDOWN:\n# \t\t\tif event.key == K_RIGHT:\n# \t\t\t\tupdate(r_45)\n\n# \t# ventana.blit(r_90,(0,0))\n# \t# ventana.blit(r_30,(0,0))\n# \t# ventana.blit(r_0,(0,0))\n# \t# ventana.blit(r_180,(0,0))\n# \t# ventana.blit(r_270,(0,0))\n# \tventana.blit(r_45,(0,0))\n\n\n\n# \tpygame.display.update()\nimport pygame\nimport time\n\ndef blit_alpha(target, source, location, opacity):\n\tx = location[0]\n\ty = location[1]\n\ttemp = pygame.Surface((source.get_width(), source.get_height())).convert()\n\ttemp.blit(target, (-x, -y))\n\ttemp.blit(source, (0, 0))\n\ttemp.set_alpha(opacity) \n\ttarget.blit(temp, location)\n\n\npygame.init()\nscreen = pygame.display.set_mode((300, 300))\ndone = False\n\nhappy = pygame.image.load('pie_2.png') # our happy blue protagonist\ncheckers = pygame.image.load('DSCN4596.JPG') # 32x32 repeating checkered image\ni = 255\nwhile not done:\n\tstart = time.time()\n\t# pump those events!\n\tfor e in pygame.event.get():\n\t\tif e.type == pygame.QUIT:\n\t\t\tdone = True\n\t# checker the background\n\tscreen.fill(0xFF0000)\n\t# here comes the protagonist\n\tif i <= 0:\n\t\ti = 255\n\tblit_alpha(screen, happy, (100, 100), i)\n\ti -=10\n\n\tpygame.display.flip()\n\n\t# yeah, I know there's a pygame clock method\n\t# I just like the standard threading sleep\n\tend = time.time()\n\tdiff = end - start\n\tframerate = 30\n\tdelay = 1.0 / framerate - diff\n\tif delay > 0:\n\t\ttime.sleep(delay)","sub_path":"imagen.py","file_name":"imagen.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"648238253","text":"import math\nimport sys\n\nfrom PIL import Image, ImageDraw\n\n\nMODE = \"RGBA\"\nBGCOLOR = \"#FFFFFF\"\nDOTCOLOR = \"#ddd\"\nLINECOLOR = \"#111111\"\nDARK = \"#000000\"\nPRIMES = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101,\n 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199]\n\n\ndef draw_dots(draw, offset_x=0, offset_y=0):\n DOTS.clear()\n for y in range(GRID):\n for x in range(GRID):\n distance = (SIZE - 1) / (GRID - 1)\n gridx = (x * distance) + offset_x\n gridy = (y * distance) + offset_y\n draw.point((gridx, gridy), fill=DOTCOLOR)\n DOTS.append([gridx, gridy])\n\n\ndef draw_lines(draw, snake_chars):\n for i, character in enumerate(snake_chars):\n if character == '1':\n selected = BIT_GRID[i]\n # print(\"drawing the grid line from %s, %s\" % selected)\n gridax = DOTS[selected[0]][0]\n griday = DOTS[selected[0]][1]\n gridbx = DOTS[selected[1]][0]\n gridby = DOTS[selected[1]][1]\n xy = (gridax, griday, gridbx, gridby)\n # print(\"guessing coords %s, %s, %s, %s\\n\" % xy)\n draw.line(xy, fill=LINECOLOR, width=1, joint=None)\n\n\ndef single_image_output(snake_chars, snake_id, trans=False):\n snake_png = \"snake_%s.png\" % snake_id\n # print(\"snake img %s\" % snake_png)\n im = init_image(MODE, [SIZE, SIZE], BGCOLOR, trans=trans)\n draw = get_draw(im)\n draw_dots(draw)\n # print(\"snake chars to draw is: %s\" % snake_chars)\n draw_lines(draw, snake_chars)\n im.save(snake_png, \"PNG\")\n\n\ndef append_output(snake_chars, offset_x, offset_y, canvas_draw):\n draw_dots(canvas_draw, offset_x, offset_y)\n # print(\"snake chars to draw is: %s\" % snake_chars)\n draw_lines(canvas_draw, snake_chars)\n\n\ndef get_next_dimensions(total_word_length, can_x, snake_id):\n page_margin_x = 2\n page_margin_y = 2\n vertical_line_margin = 10\n horizontal_char_margin = 5\n horizontal_word_margin = 10\n # x props\n num_spaces = len([c for c in CHARS if c == 0])\n num_chars = len([c for c in CHARS if c == 1])\n start_position_x = page_margin_x + (num_spaces * horizontal_word_margin) + (\n num_chars * (SIZE + horizontal_char_margin))\n word_len_x = total_word_length * (SIZE + horizontal_char_margin)\n # print(\"page_margin_x: %s; page_margin_y: %s; vertical_line_margin: %s; horizontal_char_margin: %s; horizontal_word_margin: %s\" % (page_margin_x, page_margin_y, vertical_line_margin, horizontal_char_margin, horizontal_word_margin))\n # print(\"num_spaces: %s; num_chars: %s; start_position_x: %s; word_len_x: %s\" % (num_spaces, num_chars, start_position_x, word_len_x))\n if snake_id == 0:\n # print(\"snake 0\")\n # do we have enough space in this line to write this whole word; if not, advance now to next line\n if (start_position_x + word_len_x) >= (can_x + page_margin_x):\n # new line\n LINES.append(1)\n CHARS.clear()\n # reset x props\n num_spaces = len([c for c in CHARS if c == 0])\n num_chars = len([c for c in CHARS if c == 1])\n start_position_x = page_margin_x + (num_spaces * horizontal_word_margin) + (\n num_chars * (SIZE + horizontal_char_margin))\n offset_y = page_margin_y + (len(LINES) * (SIZE + vertical_line_margin))\n offset_x = start_position_x\n CHARS.append(1)\n if snake_id == (total_word_length - 1):\n # append a space\n # print(\"append space\")\n CHARS.append(0)\n return offset_x, offset_y\n\n\ndef loop_snakes(val, trans=False, canvas_draw=None, can_x=0):\n binstr = \"{0:b}\".format(val)[::-1] # reversed\n total_word_length = math.ceil(len(binstr) / BITS)\n # for each BITS characters starting from the left: (the resulting snake will be little-endian)\n snake_id = 0\n while True:\n # print(\"snake id %s\" % snake_id)\n # print(\"binstr is now %s\" % binstr)\n break_out = False\n if len(binstr) < BITS:\n snake_chars = binstr\n break_out = True\n else:\n snake_chars, binstr = binstr[:BITS], binstr[BITS:]\n if canvas_draw is None:\n single_image_output(snake_chars, snake_id, trans=trans)\n else:\n offset_x, offset_y = get_next_dimensions(total_word_length, can_x, snake_id)\n append_output(snake_chars, offset_x, offset_y, canvas_draw)\n snake_id += 1\n if break_out:\n break\n\n\ndef draw_snakes_main(words, trans=False, single_word_images=True, can_x=0, can_y=0):\n canvas_draw = None\n can_im = None\n if single_word_images is False:\n # print(\"dsm: single word\")\n can_im = init_image(MODE, [can_x, can_y], BGCOLOR, trans=trans)\n canvas_draw = get_draw(can_im)\n for word in words.split():\n val = calculate_word(word)\n loop_snakes(val, trans=trans, canvas_draw=canvas_draw, can_x=can_x)\n if single_word_images is False:\n can_im.save(\"canvas.png\", \"PNG\")\n\n\ndef init_image(mode, size, bgcolor, trans=False):\n im = Image.new(mode, size, bgcolor)\n if trans:\n trans = [(255, 255, 255, 0) for _ in range(size[0] * size[1])]\n im.putdata(trans)\n return im\n\n\ndef get_draw(im):\n return ImageDraw.Draw(im)\n\n\ndef calculate_word(word):\n \"\"\"The mathematical value of word is:\n\n nth character: take the nth prime and raise it to the power of char(n) of word; a=1, z=26\n\n \"\"\"\n val = 1\n for i, character in enumerate(word):\n val = val * (PRIMES[i] ** (ord(character.upper()) - 64))\n return val\n\n\ndef decode(binary):\n binary = int(binary[::-1], 2)\n print(\"got %s\" % binary)\n for i, prime in enumerate(PRIMES):\n j = 0\n while True:\n if binary < 3:\n break\n elif binary % prime == 0:\n binary = binary // prime\n j += 1\n else:\n break\n print(\"character %s is letter %s\" % (i, j))\n if binary < 3:\n break\n\n\ndef main(word, single_word_images, can_x, can_y, trans):\n if trans == 1:\n trans = True\n else:\n trans = False\n if single_word_images == 1:\n # print(\"single word images!\")\n single_word_images = True\n else:\n # print(\"single big canvas!\")\n single_word_images = False\n draw_snakes_main(word, trans=trans, single_word_images=single_word_images, can_x=can_x, can_y=can_y)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 7:\n raise Exception(\"draw.py word single_word_images(1|0) can_x(0|255) can_y(0|255) trans(0|1) grid(6|x)\")\n\n word, single_word_images, can_x, can_y, trans, GRID = sys.argv[1], int(sys.argv[2]), int(sys.argv[3]), int(sys.argv[4]), int(sys.argv[5]), int(sys.argv[6])\n\n BITS = GRID * (GRID - 1) * 2\n DISTANCE = 3\n SIZE = (GRID - 1) * (DISTANCE) + 1\n\n BIT_GRID = []\n for i in range(GRID * 2 - 1):\n linebase = (i // 2) * GRID\n if i % 2 == 0:\n # output a \"horizontal\" point grid of size n - 1\n for j in range(GRID - 1):\n BIT_GRID.append((linebase + j, linebase + j + 1));\n else:\n # output a \"vertical\" point grid of size n\n for j in range(GRID):\n BIT_GRID.append((linebase + j, linebase + j + GRID))\n # print(BITS)\n DOTS = []\n CHARS = []\n LINES = []\n\n main(word, single_word_images, can_x, can_y, trans)\n","sub_path":"draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":7533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"541045605","text":"birthday={'naveen':'10-08-1997','siri':'15-12-1998','bin':'04-05-1999'}\n\nwhile True:\n\tprint(\"Enter name of your friend\")\n\tname=input()\n\tif name=='':\n\t\tbreak\n\tif name in birthday:\n\t\tprint(birthday[name]+'I have found')\n\telse:\n\t\tprint(\"Enter the bday of new friend\")\n\t\tbday=input()\n\t\tbirthday[name]=bday\n\t\tprint(\"Successfuly updated friends bday\")\nprint(birthday)\n\t\t\n","sub_path":"friendbday.py","file_name":"friendbday.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"45120106","text":"import sys, os, gzip\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nhardStatFileName, softStatFileName, plotFileName = sys.argv[1:]\n\ndef readStatMeansFromFile(statFileName, numSubWins=11):\n first=True\n with open(statFileName, \"rt\") as statFile:\n for line in statFile:\n if first:\n header = line.strip().split()\n statSums = [0]*len(header)\n statTots = [0]*len(header)\n first = False\n else:\n statvals = [float(x) for x in line.strip().split()]\n for i in range(len(statvals)):\n statSums[i] += statvals[i]\n statTots[i] += 1\n\n statMeans = {}\n for i in range(len(header)):\n statName = header[i].split(\"_win\")[0]\n if not statName in statMeans:\n statMeans[statName] = [0]*numSubWins\n\n for i in range(len(header)):\n statName, win = header[i].split(\"_win\")\n win = int(win)\n statMeans[statName][win] = statSums[i]/statTots[i]\n return statMeans\n\ndef plotBigStats(stats, statNames, titles, colors, markers, plotFileName):\n fig, ax = plt.subplots(1, 2, figsize=(10, 5))\n for sweepTypeIndex in range(2):\n for i in range(len(statNames)):\n print(sweepTypeIndex, statNames[i], stats[sweepTypeIndex][statNames[i]])\n ax[sweepTypeIndex].plot(list(range(-5,6)), stats[sweepTypeIndex][statNames[i]], color=colors[i], lw=1, marker=markers[i], label=statNames[i])\n plt.setp(ax[sweepTypeIndex].get_xticklabels(), fontsize=14)\n plt.setp(ax[sweepTypeIndex].get_yticklabels(), fontsize=14)\n ax[sweepTypeIndex].set_xlabel(\"Distance from sweep\", fontsize=14)\n ax[sweepTypeIndex].set_ylabel(\"Relative value of statistic\", fontsize=14)\n ax[sweepTypeIndex].set_xticks(list(range(-5,6)))\n ax[sweepTypeIndex].set_xticklabels([\"\"]*5 + [\"0\"] + [\"\"]*5)\n ax[sweepTypeIndex].legend()\n ax[sweepTypeIndex].set_ylim((0, 0.15))\n ax[sweepTypeIndex].set_title(titles[sweepTypeIndex])\n fig.tight_layout()\n fig.savefig(plotFileName)\n\ncolors = ['black','red','blue','violet','orange','cyan','gray','brown']\nmarkers = ['o', 'v', '^', 'x', 's', '+', 'D', '']\nstatsToPlot = 'pi tajD fayWuH maxFDA HapCount ZnS Omega'.split()\n\nhardStatMeans = readStatMeansFromFile(hardStatFileName)\nsoftStatMeans = readStatMeansFromFile(softStatFileName)\nplotBigStats([hardStatMeans, softStatMeans], statsToPlot, [\"Hard Sweep\", \"Soft Sweep\"], colors, markers, plotFileName)\n","sub_path":"sweepPipeline/plotStatMeans.py","file_name":"plotStatMeans.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"288531473","text":"from random import randint, seed\nfrom sys import argv\n\nf1 = open(argv[1], \"w\")\nf2 = open(argv[2], \"w\")\n\n#seed(0)\n\nn = 102\nf1.write(str(n))\nf1.write(\"\\n\")\nf2.write(str(n))\nf2.write(\"\\n\")\n\nv = []\nfor i in range(n):\n v.append(randint(3, 1e9))\n f1.write(str(v[i]))\n f1.write(\"\\n\")\n\nr = []\nfor i in range(n):\n r.append(v[(i-1)%n] + v[(i)%n] + v[(i+1)%n])\n f2.write(str(r[i]))\n f2.write(\"\\n\")\n\n\n","sub_path":"homeworks/hw3/kemija/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"336975045","text":"from itertools import permutations\ndef solution(arr):\n n = len(arr)\n m = n//2\n idx = [2*e+1 for e in range(m)] \n arr = [e if i%2 else int(e) for i, e in enumerate(arr)]\n permlist = list(permutations(idx))\n answer = []\n for perm in permlist:\n temp = arr[:]\n n = len(perm)\n j = 0\n while j < n:\n idx = perm[j]\n a = temp[idx-1]\n b = temp[idx+1]\n operation = temp[idx]\n if operation == \"+\":\n temp = temp[:idx-1]+[a+b]+temp[idx+2:]\n else :\n temp = temp[:idx-1]+[a-b]+temp[idx+2:]\n perm = [e-2 if e > idx else e for e in perm]\n j+= 1\n answer.append(temp[0])\n\n print(answer)\n return max(answer)\n\n\narr =[\"1\", \"-\", \"3\", \"+\", \"5\", \"-\", \"8\"]\nresult=solution(arr)\nprint(result)","sub_path":"20xx_kakao_maestro/arithmetic/arithmetic.py","file_name":"arithmetic.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"547699571","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jun 05 13:39:23 2017\r\n\r\n@author: anand\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 16 18:20:33 2017\r\n\r\n@author: anand\r\n\"\"\"\r\n\r\n#%%:Header Files and Base Directory\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport lasagne\r\nimport theano\r\nimport cPickle as pickle\r\n\r\n#%% load data\r\nT = theano.tensor\r\nL = lasagne.layers\r\n\r\ntrainS = np.load(\"trainV.npy\")\r\ntrainB = np.load(\"trainBG.npy\")\r\ntestS = np.load(\"testV.npy\")\r\ntestB = np.load(\"testBG.npy\")\r\n\r\n\r\ndata = T.ftensor4()\r\nlabels = T.ivector()\r\n\r\n\r\n#%% define network\r\n# Define the actual network layer-by-layer\r\n\r\n\r\nactivation = lasagne.nonlinearities.rectify\r\nw = lasagne.init.GlorotNormal('relu')\r\nB = lasagne.init.Constant(0.001)\r\n\r\nnetwork = L.InputLayer(shape=(None,1,49,49), input_var=data)\r\n\r\nnetwork = L.Conv2DLayer(network, num_filters=16, filter_size=3,nonlinearity = activation, W = w, b = B)\r\nnetwork = L.Conv2DLayer(network, num_filters=32, filter_size=3,nonlinearity = activation, W = w, b = B)\r\nnetwork = L.MaxPool2DLayer(network, pool_size=2)\r\n\r\nnetwork = L.Conv2DLayer(network, num_filters=32, filter_size=3,nonlinearity = activation, W = w, b = B)\r\nnetwork = L.Conv2DLayer(network, num_filters=32, filter_size=3,nonlinearity = activation, W = w, b = B)\r\nnetwork = L.MaxPool2DLayer(network, pool_size=2)\r\n\r\nnetwork = L.Conv2DLayer(network, num_filters=64, filter_size=3,nonlinearity = activation, W = w, b = B)\r\nnetwork = L.Conv2DLayer(network, num_filters=64, filter_size=3,nonlinearity = activation, W = w, b = B)\r\nnetwork = L.Conv2DLayer(network, num_filters=64, filter_size=3,nonlinearity = activation, W = w, b = B)\r\nnetwork = L.MaxPool2DLayer(network, pool_size=3)\r\n\r\nnetwork = L.Conv2DLayer(network, num_filters=128, filter_size=1,nonlinearity = activation)\r\n\r\nnetwork = L.dropout(network, p=0.5)\r\n\r\nnetwork = L.DenseLayer(network, num_units=12, nonlinearity=lasagne.nonlinearities.softmax)\r\n\r\nn_params = L.count_params(network, trainable=True)\r\nprint('Network defined with {} trainable parameters'.format(n_params))\r\n\r\n#%% Objective and sybolic Functions to call the network on training and test data respectively\r\ndef objectives(deterministic):\r\n global network, labels\r\n predictions = L.get_output(network, deterministic=deterministic)\r\n \r\n #print(predictions,labels)\r\n \r\n loss = lasagne.objectives.categorical_crossentropy(predictions, labels).mean()\r\n loss += 0.0001 * lasagne.regularization.regularize_network_params(network, lasagne.regularization.l2)\r\n \r\n accuracy = T.mean(T.eq(T.argmax(predictions, axis=1), labels), dtype=theano.config.floatX)\r\n #accuracy = T.mean(lasagne.objectives.categorical_accuracy(predictions, labels, top_k=1))\r\n return loss, accuracy\r\n\r\ntrain_loss, train_accuracy = objectives(deterministic=False)\r\nparams = L.get_all_params(network, trainable=True)\r\nupdates = lasagne.updates.sgd(train_loss, params, learning_rate=0.0001)\r\n\r\ntrain = theano.function(inputs=[data, labels], outputs=[train_loss, train_accuracy], \r\n updates=updates, allow_input_downcast=True)\r\ntest = theano.function(inputs=[data, labels], outputs=objectives(deterministic=True), \r\n allow_input_downcast=True)\r\n\r\n#%% ALL-DATA\r\n#label Spine = 1\r\n#label background = 0\r\n\r\n\r\n#labels train+test\r\ntrainS_l = trainS[:,-1]\r\ntrainB_l = trainB[:,-1]\r\ntestS_l = testS[:,-1]\r\ntestB_l = testB[:,-1]\r\n\r\n\r\n#Data train+test\r\ntrainS = trainS[:,0:trainS.shape[1]-1].reshape((trainS.shape[0],1,49,49))\r\ntrainB = trainB[:,0:trainB.shape[1]-1].reshape((trainB.shape[0],1,49,49))\r\ntestS = testS[:,0:testS.shape[1]-1].reshape((testS.shape[0],1,49,49))\r\ntestB = testB[:,0:testB.shape[1]-1].reshape((testB.shape[0],1,49,49))\r\n\r\n\r\n#Indices train_test\r\n\r\ntrainS_i = range(trainS.shape[0]) # for N slices, make a list [0, 1, 2, ..., N-1]\r\ntrainB_i = range(trainB.shape[0])\r\ntestS_i = range(testS.shape[0])\r\ntestB_i= range(testB.shape[0])\r\n\r\n\r\n#%%train in minbatch\r\nepochs = 10\r\nminibatch_size = 252\r\ndef iterate_in_minibatches(name, fn, slices_0, slices_1,labels_0, labels_1, indices_0, indices_1):\r\n global minibatch_size\r\n\r\n dataset_size = min(len(indices_0), len(indices_1)) # N samples of class 0 and M of class 1 -> get the smaller number\r\n\r\n # Instead of shuffling the actual data (the slices), we just shuffle a list of indices (much more efficient!)\r\n # If you have enough data, you can also just use random minibatches, so just pick N random samples each time.\r\n np.random.shuffle(indices_0)\r\n np.random.shuffle(indices_1)\r\n\r\n performance = []\r\n \r\n # Walk from 0 to dataset_size in steps of half a minibatch (because half a minibatch will be 0, the other half 1)\r\n for mb_start in xrange(0, dataset_size, minibatch_size / 2):\r\n mb_data = np.concatenate((slices_0[indices_0[mb_start:mb_start+(11*minibatch_size/12)],:,:,:],\r\n slices_1[indices_1[mb_start:mb_start+minibatch_size/12],:,:,:]),axis=0)\r\n \r\n \r\n #print mb_data.shape\r\n mb_labels = np.concatenate((labels_0[indices_0[mb_start:mb_start+(11*minibatch_size/12)]],\r\n labels_1[indices_1[mb_start:mb_start+minibatch_size/12]]),axis=0)\r\n mb_labels = mb_labels.astype(np.int32)\r\n \r\n #print mb_labels.shape\r\n #print mb_labels.dtype\r\n \r\n # Turn [xy, xy, n*2] (normal x,y,z format) into [n*2, 1, xy, xy] (theano format, 4D tensor)\r\n if mb_data.shape[0] != minibatch_size:\r\n break # skip incomplete minibatches (shouldn't happen, but just to be sure...)\r\n\r\n \r\n performance.append(fn(mb_data, mb_labels))\r\n\r\n # We got one value for the loss and one for the accuracy for each minibatch. Let's take the average over all\r\n # minibatches and display that:\r\n performance = np.asarray(performance).mean(axis=0)\r\n print(' > {}: loss = {} ; accuracy = {}'.format(name, performance[0], performance[1]))\r\n \r\n return performance\r\n\r\n#%%\r\n# First train the network, then test it on the data that was not used for training, then repeat\r\nfor epoch in xrange(1, epochs + 1):\r\n print('Epoch {}/{}'.format(epoch, epochs))\r\n iterate_in_minibatches('Training', train, trainS, trainB, trainS_l, trainB_l, trainS_i, trainB_i)\r\n iterate_in_minibatches('Testing', test, testS, testB, testS_l, testB_l, testS_i, testB_i)\r\n\r\nprint('Training complete!')\r\n\r\n#%%\r\n\r\nnetinfo = {'network': network,'params': L.get_all_param_values(network)}\r\nf = open('network.pkl','wb')\r\npickle.dump(netinfo,f,protocol = pickle.HIGHEST_PROTOCOL )\r\n\r\n","sub_path":"Project Codes-UMC/Thoracic/thoracic_network.py","file_name":"thoracic_network.py","file_ext":"py","file_size_in_byte":6589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"132048070","text":"#!/usr/bin/env python\nimport os\nimport zipfile\nimport errno\nimport io\nfrom lxml import etree\n\nimport docopt\nfrom exercises_server_api import ExercisesServerSession\n\nUSAGE = '''\nUsage:\n client.py read ( [])...\n client.py list\n\nOptions:\n read []: a list of ids and optional seeds can be given. If no\n seeds are specified, the template zip is returned, else an instance of the\n template is returned.\n list: returns a list of IDs available on the server.\n'''\n\n\ndef mkdir_p(path):\n ''' mkdir -p functionality\n from:\n http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python\n '''\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\ndef write_template(templatezip, _id, _seed):\n '''\n Do required post processing of the template zip object and write to disk\n '''\n\n outputfolder = os.path.join('template', \"{}-{}\".format(_id[0:8], _seed))\n # make that folder if it does not exist\n mkdir_p(outputfolder)\n zf = io.BytesIO(templatezip)\n zipfile.ZipFile(zf).extractall(outputfolder)\n\n # read the xml file\n with open(os.path.join(outputfolder, 'main.xml')) as xml:\n mainxml = etree.XML(xml.read())\n\n\n elements_to_remove = ['response', 'hint']\n for elem in elements_to_remove:\n for thiselem in mainxml.findall(\".//{}\".format(elem)):\n thiselem.getparent().remove(thiselem)\n\n with open(os.path.join(outputfolder, 'main.xml'), 'w') as out:\n out.write(etree.tostring(mainxml, encoding='utf-8',\n xml_declaration=True))\n\n\nif __name__ == \"__main__\":\n arguments = docopt.docopt(USAGE)\n server = arguments['']\n port = arguments['']\n session = ExercisesServerSession('{}:{}'.format(server, port), auth=None,\n verify=False)\n alltemplates = session.list('testing')\n # just list all the template IDs\n if arguments['list']:\n for template in alltemplates:\n print(template['id'])\n\n # get a template from the server\n if arguments['read']:\n all_ids = arguments['']\n if arguments['']:\n all_seeds = arguments['']\n else:\n all_seeds = [None for i in all_ids]\n\n for _id, _seed in zip(all_ids, all_seeds):\n\n # if we're asking for a full id, just ask the server for it\n\n if len(_id) > 8:\n template = session.read(_id, 'testing', _seed)\n else:\n # if we're asking for a short id, first get the list of ids\n # from the server then select the correct one\n template = [t for t in alltemplates if t['id'].startswith(_id)]\n assert(len(template) == 1)\n template = template[0]\n longid = template['id']\n template = session.read(longid, 'testing', _seed)\n\n template = write_template(template, _id, _seed)\n","sub_path":"client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"495796614","text":"import h5py\nfrom PIL import Image\nimport numpy as np\nimport os\nimport glob\nfrom matplotlib import pyplot as plt\nfrom scipy.ndimage.filters import gaussian_filter \n\n#this is borrowed from https://github.com/davideverona/deep-crowd-counting_crowdnet\ndef gaussian_filter_density_fix(gt):\n density = np.array(gt, dtype=np.float32)\n sigma = 15\n density = gaussian_filter(density, sigma, mode='constant')\n return density\n\nsave_path = './data/'\nos.makedirs(save_path, exist_ok=True)\nshape = (1024, 1024)\nres = 8\nn_max = 50\n\nfor i in range(220):\n print('generate', i) \n n = np.random.randint(n_max)\n name = \"img_{:03}\".format(i)\n x = np.random.randint(shape[0], size=n)\n y = np.random.randint(shape[1], size=n)\n m = np.zeros(shape)\n m[x, y] = 1\n\n small_m = np.zeros((shape[0] // res, shape[1] // res))\n small_m[x // 8, y // 8] = 1\n\n d = gaussian_filter_density_fix(m)\n with h5py.File(save_path + name, 'w') as hf:\n hf['map'] = small_m\n hf['density'] = d\n","sub_path":"decon/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"248579350","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2012 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# All Rights Reserved.\n#\n# Copyright 2012 Nebula, Inc.\n# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nViews for managing floating IPs.\n\"\"\"\nimport json\n\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.core.urlresolvers import reverse\nfrom django.core import urlresolvers\nfrom django.http import HttpResponse\nfrom django.utils import log\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import View\n\nfrom horizon import exceptions\nfrom horizon import forms\nfrom horizon import workflows\nfrom horizon import tables\nfrom horizon import tabs\nfrom horizon.utils import memoized\n\nfrom openstack_dashboard import api\nfrom openstack_dashboard.api import network\nfrom openstack_dashboard.api import nova\nfrom openstack_dashboard.api import service_support\nfrom openstack_dashboard.usage import quotas\nfrom openstack_dashboard.utils import floatingip_check\nfrom openstack_dashboard.utils import price_handle\n\nfrom openstack_dashboard.dashboards.network.\\\n floating_ips import forms as project_forms\nfrom openstack_dashboard.dashboards.network.\\\n floating_ips import workflows as project_workflows\nfrom openstack_dashboard.dashboards.network.\\\n floating_ips import tabs as project_tabs\nfrom openstack_dashboard.dashboards.network.\\\n floating_ips import tables as project_tables\nfrom openstack_dashboard.utils import user_type\nfrom openstack_dashboard.utils import adjust_plan\nfrom openstack_dashboard.utils.concurrent import ConcurrentEval\n\nfrom openstack_dashboard.dashboards.network.models import SharedBandwidthFIP\nfrom openstack_dashboard.dashboards.network.models import ResourcePayType\n\nLOG = log.getLogger(__name__)\n\nSTATUS_DISPLAY_CHOICES = {\n # for dispalying \"status\"\n \"ACTIVE\": _(\"Active\"),\n \"Active\": _(\"Active\"),\n \"BUILD\": _(\"Build\"),\n \"Build\": _(\"Build\"),\n \"SUSPENDED\": _(\"Suspended\"),\n \"Suspended\": _(\"Suspended\"),\n \"DOWN\": _(\"Services Down\"),\n \"Down\": _(\"Services Down\"),\n \"ERROR\": _(\"Error\"),\n \"Error\": _(\"Error\"),\n}\n\nDEVICE_OWNER_LB = \"neutron:LOADBALANCERV2\"\nDEVICE_OWNER_INSTANCE = \"compute:None\"\n\n\nclass IndexView(tables.DataTableView):\n table_class = project_tables.FloatingIPsTable\n template_name = 'network/floating_ips/index.html'\n\n @memoized.memoized_method\n def get_data(self):\n\n # get floating ips\n def get_floatingips():\n try:\n floating_ips = network.tenant_floating_ip_list(self.request)\n except Exception:\n floating_ips = []\n exceptions.handle(self.request,\n ('Unable to retrieve floating IP addresses.'))\n return floating_ips\n\n # get instances\n def get_instances():\n instances = []\n try:\n instances, has_more, has_prev_data = nova.server_list(\n self.request)\n except Exception:\n exceptions.handle(self.request,\n _('Unable to retrieve instance list.'))\n return instances\n\n # get all routers\n def get_routers():\n routers = []\n try:\n routers = api.neutron.router_list(request=self.request)\n except Exception:\n exceptions.handle(self.request,\n _('Unable to retrieve router list.'))\n return routers\n\n # get all loadbalancers\n def get_loadbalancers():\n loadbalancers = []\n try:\n loadbalancers, more, prev = \\\n api.lbaasv2.loadbalancer_list(self.request)\n except Exception:\n exceptions.handle(self.request,\n _('Unable to retrieve loadbalancers list.'))\n\n return loadbalancers\n\n def set_floatingip_order_info(floatingip):\n try:\n floatingip.order_info = api.service_support.get_orders(\n self.request, floatingip.id)\n except Exception:\n pass\n\n func_list = [(get_floatingips, []), (get_instances, []),\n (get_routers, []), (get_loadbalancers, [])]\n floating_ips, instances, routers, loadbalancers = ConcurrentEval(\n func_list).return_val()\n\n instances_dict = dict([(obj.id, obj.name) for obj in instances])\n routers_dict = dict([(obj.id, obj.name) for obj in routers])\n lbs_dict = dict([(obj.id, obj.name) for obj in loadbalancers])\n\n def _set_floatingip_attr(ip):\n try:\n port = api.neutron.port_get(request=self.request,\n port_id=ip.floating_port_id)\n except Exception as e:\n exceptions.handle(request=self.request, message=_(\" %s\") % e)\n port = None\n\n if port.get('device_owner') == \"network:router_gateway\":\n ip.is_gateway_port = True\n else:\n ip.is_gateway_port = False\n\n if port:\n if instances_dict.get(ip.instance_id):\n ip.instance_name = instances_dict.get(ip.instance_id)\n view = \"horizon:compute:instances:detail\"\n ip.device_link = urlresolvers.reverse(\n view, args=(ip.instance_id,))\n ip.binded_device_id = ip.instance_id\n ip.device_type = _('Instance')\n elif routers_dict.get(port.device_id):\n ip.instance_name = routers_dict.get(port.device_id)\n view = \"horizon:network:routers:detail\"\n ip.device_link = urlresolvers.reverse(\n view, args=(port.device_id,))\n ip.binded_device_id = port.device_id\n ip.device_type = _(\"Router\")\n elif lbs_dict.get(ip.instance_id):\n ip.instance_name = lbs_dict.get(ip.instance_id)\n view = \"horizon:network:loadbalancersv2:detail\"\n ip.device_link = urlresolvers.reverse(\n view, args=(ip.instance_id,))\n ip.binded_device_id = ip.instance_id\n ip.device_type = _('Load Balancer')\n else:\n ip.device_link = None\n ip.binded_device_id = None\n\n func_list = [(_set_floatingip_attr, [floatingip]) for\n floatingip in floating_ips]\n\n if not user_type.check_user_type(self.request):\n func_list += [(set_floatingip_order_info, [floatingip]) for\n floatingip in floating_ips]\n\n ConcurrentEval(func_list).return_val()\n\n shared_fips = dict([[fip.floatingip_id, fip.shared_bw_type] for fip in\n SharedBandwidthFIP.objects.all()])\n\n for fip in floating_ips:\n if fip.id in shared_fips.keys():\n fip.bandwidth = _(shared_fips[fip.id])\n\n return floating_ips\n\n def get_user_type(self):\n \"\"\"decision user's billing way:\n\n True:quota billing\n False:flexible billing\n by request.user.services_region.owner field\n \"\"\"\n return user_type.check_user_type(self.request)\n\n def get_context_data(self, **kwargs):\n context = super(IndexView, self).get_context_data(**kwargs)\n if hasattr(self, \"table\"):\n context[\"user_type\"] = self.get_user_type()\n return context\n\n\nclass AdjustFloatingIPBandwidthView(forms.ModalFormView):\n form_class = project_forms.AdjustBandwidth\n success_url = 'horizon:network:floating_ips:index'\n failure_url = 'horizon:network:floating_ips:index'\n template_name = 'network/floating_ips/adjust.html'\n\n min_bandwidth = 1\n max_bandwidth = 100\n step_bandwidth = 1\n bandwidth = min_bandwidth\n\n def get_bandwidth_info(self):\n \"\"\"get some info of ip bandwidth\n\n :return {}.fromkeys([id,name,min,max,step,value])\n note:Not the final!\n \"\"\"\n return {\n 'id': 'id_adjust_bandwidth',\n 'name': 'bandwidth',\n 'min': self.min_bandwidth,\n 'max': self.max_bandwidth,\n 'step': self.step_bandwidth,\n 'value': self.bandwidth\n }\n\n def get_user_type(self):\n \"\"\"decision user's billing way:\n\n True:quota billing\n False:flexible billing\n by request.user.services_region.owner field\n \"\"\"\n return user_type.check_user_type(self.request)\n\n def get_context_data(self, **kwargs):\n context = super(AdjustFloatingIPBandwidthView, self).\\\n get_context_data(**kwargs)\n try:\n context['dragbar'] = self.get_bandwidth_info()\n context['user_type'] = self.get_user_type()\n context['floatingip'] = self.kwargs['ip_id']\n context[\"official_user\"] = user_type.is_user_official(\n self.request)\n context['usages'] = quotas.tenant_quota_floatingip_usages(\n self.request)\n context['quota_available'] = context['usages'][\n 'bandwidth']['available']\n if not context[\"official_user\"]:\n context['quota_deadline'] = \\\n api.service_support.get_quotadeadline(\n self.request)[\"quotaDeadline\"]\n except Exception as e:\n LOG.error(\"Retrieve floating infomation error : %s\" % e)\n exceptions.handle(self.request)\n return context\n\n def get_success_url(self):\n return reverse(self.success_url,)\n\n def get_object(self):\n try:\n networks = api.neutron.subnet_get(self.request,\n self.kwargs['ip_id'])\n except Exception as e:\n networks = []\n msg = _('Network list can not be retrieved.')\n LOG.error(\"retrieve networks error: %s\" % e)\n exceptions.handle(self.request, msg)\n\n return networks\n\n def get_initial(self):\n ret_data = {'ip_id': self.kwargs['ip_id']}\n floating_ip = network.tenant_floating_ip_get(\n self.request,\n floating_ip_id=self.kwargs['ip_id'])\n bandwidth = floating_ip.get('bandwidth', None)\n if not self.get_user_type():\n order_info = service_support.get_orders(self.request,\n self.kwargs['ip_id'])\n ret_data['product_type'] = order_info['product']['name']\n if bandwidth:\n self.bandwidth = bandwidth\n return ret_data\n\n\nclass AssociateFloatingIPRouteView(forms.ModalFormView):\n form_class = project_forms.AssociateRouter\n success_url = 'horizon:network:floating_ips:index'\n failure_url = 'horizon:network:floating_ips:index'\n template_name = 'network/floating_ips/associaterouters.html'\n\n def get_context_data(self, **kwargs):\n context = super(AssociateFloatingIPRouteView, self).get_context_data(\n **kwargs)\n context['floatingip'] = self.kwargs['ip_id']\n return context\n\n def get_success_url(self):\n\n return reverse(self.success_url,)\n\n def get_initial(self):\n floating_ip = api.network.tenant_floating_ip_get(self.request,\n self.kwargs['ip_id'])\n return {\"IP_addr\": floating_ip.ip,\n 'ip_id': self.kwargs['ip_id'],\n 'network_id': floating_ip['pool']}\n\n\nclass AssociateView(workflows.WorkflowView):\n workflow_class = project_workflows.IPAssociationWorkflow\n\n def get_initial(self):\n if self.request.method == 'GET':\n floating_ip = network.tenant_floating_ip_get(\n request=self.request,\n floating_ip_id=self.request.GET.get('ip_id'))\n\n return {\"ip_address\": floating_ip.floating_ip_address,\n \"ip_id\": self.request.GET.get('ip_id')}\n else:\n floating_ip = self.request.POST.get('ip_id')\n return {\"ip_id\": floating_ip}\n\n\nclass AssociateFloatingIPLBView(AssociateView):\n workflow_class = project_workflows.AssociateFloatingIPLBWorkflow\n\n def get_initial(self):\n if self.request.method == 'GET':\n floating_ip = network.tenant_floating_ip_get(\n request=self.request,\n floating_ip_id=self.kwargs['ip_id']\n )\n return {\"ip_address\": floating_ip.floating_ip_address,\n \"ip_id\": self.kwargs['ip_id']}\n\n\nclass fip_targets(View):\n\n def populate_target_ids_choices(self, request, instance_id):\n floating_ip_targets = {}\n if not instance_id:\n return floating_ip_targets\n\n def get_targets():\n return api.network.floating_ip_target_list_by_instance(\n request, instance_id=instance_id)\n\n def get_floatingip_list():\n return api.network.tenant_floating_ip_list(request)\n\n con = ConcurrentEval([(get_targets, []), (get_floatingip_list, [])])\n\n targets, floating_ip_list = con.return_val()\n\n for target in targets:\n fixed_ip_id, fix_ip = target.split('_')\n flag = False\n for floating_ip in floating_ip_list:\n if fix_ip == floating_ip.fixed_ip_address:\n flag = True\n break\n if flag:\n continue\n if not floatingip_check.is_subnet_reachable_to_public(request,\n fixed_ip_id):\n continue\n floating_ip_targets.update({target: fix_ip})\n return floating_ip_targets\n\n def post(self, request, *args, **kwargs):\n content_type = \"application/json\"\n try:\n selected_instance = json.loads(request.body)\n except Exception as e:\n LOG.error(\"Parse json message error, body : %s,\"\n \"exception :%s\" % (request.body, e))\n selected_instance = None\n if not selected_instance or not isinstance(selected_instance, dict):\n response = {}\n else:\n response = self.populate_target_ids_choices(request,\n selected_instance.get(\n 'target', None))\n response = json.dumps(response)\n return HttpResponse(response, content_type)\n\n\nclass AllocateView(forms.ModalFormView):\n form_class = project_forms.FloatingIpAllocate\n template_name = 'network/floating_ips/allocate.html'\n success_url = reverse_lazy('horizon:network:floating_ips:index')\n product_types = ['fixedBandwidth', 'floatBandwidth']\n products = []\n product_res = []\n min_bandwidth = 1\n max_bandwidth = 100\n step_bandwidth = 1\n bandwidth = min_bandwidth\n\n def get_user_type(self):\n \"\"\"decision user's billing way:\n\n True:quota billing\n False:flexible billing\n by request.user.services_region.owner field\n \"\"\"\n return user_type.check_user_type(self.request)\n\n def get_bandwidth_info(self):\n \"\"\"get some info of ip bandwidth\n\n :return {}.fromkeys([id,name,min,max,step,value])\n note:Not the final!\n \"\"\"\n return {\n 'id': 'id_bandwidth_slider',\n 'name': 'bandwidth',\n 'min': self.min_bandwidth,\n 'max': self.max_bandwidth,\n 'step': self.step_bandwidth,\n 'value': self.bandwidth\n }\n\n def get_product_types(self):\n \"\"\"get product types of bandwidth\n\n :return one or all of\n [{'value':'fixedBandwidth','display':_('fixedBandwidth')},\n {'value':'floatBandwidth','display':_('floatBandwidth')}]\n \"\"\"\n self.products = []\n products_on_server = price_handle.get_products_list(self.request)\n\n for product_check in products_on_server:\n spts = ResourcePayType.objects.filter(pay_type=product_check)\n if len(spts):\n # only the floating product will be show\n self.products.append(product_check)\n\n mapping = getattr(settings, 'SHARED_BANDWIDTH_SUBNET_MAPPING', {})\n self.products.extend(mapping.keys())\n\n ret_value = []\n\n for product in self.products:\n spts = ResourcePayType.objects.filter(pay_type=product)\n subnet_id_pay_type_list = []\n for spt in spts:\n subnet_id_pay_type_list.append(\n spt.resource_id + \"_\" + spt.pay_type)\n if subnet_id_pay_type_list:\n product_info = {\n 'value': product,\n 'subnet_id': \"__\".join(subnet_id_pay_type_list),\n 'display': spt.pay_type_alias\n }\n ret_value.append(product_info)\n for name, subnet_id in mapping.iteritems():\n product_info = {\n 'value': name,\n 'subnet_id': subnet_id + \"_\" + name,\n 'display': name}\n ret_value.append(product_info)\n\n return ret_value\n\n def get_billing_ways(self):\n \"\"\"get billing ways of a product\n\n :return if success:\n [\n {\n 'product_type': xx,\n 'billing_ways':[\n {\n 'value': xx,\n 'display': xx\n },\n ]\n },\n ]\n else:\n []\n Note:\n must be called after call get_product_types()\n \"\"\"\n const_billing_way_map = {\n '0': _('numeric'),\n '1': _('product'),\n '2': _('peak'),\n '3': _('peak-prepaid'),\n '4': _('traffic'),\n '5': _('traffic-prepaid'),\n }\n ret_value = []\n try:\n for product in self.products:\n product_items = price_handle.get_product_items_list(\n self.request,\n product)\n for product_item in product_items:\n if product_item in ['ip']:\n continue\n product_item_types = price_handle.\\\n get_produc_item_types(self.request,\n product,\n product_item)\n ret_item = {\n 'product_type': product,\n 'billing_ways': []\n }\n for item_type in product_item_types:\n ret_item['billing_ways'].append({\n 'value': item_type,\n 'display': const_billing_way_map.get(\n item_type, item_type),\n })\n ret_value.append(ret_item)\n return ret_value\n except Exception as e:\n LOG.error('get billing ways failed,message:%s,products:%s' %\n (e.message, self.products, ))\n return []\n\n def get_billing_periods(self):\n \"\"\"get flexible billing way\n\n :return if success:\n [\n {\n 'product_type': product,\n 'billing_way': item_type,\n 'periods': [\n {\n 'value': period,\n 'display': _(period),\n }\n ]\n },\n ]\n else:\n []\n Note:\n must be called after call get_product_types()\n \"\"\"\n ret_value = []\n try:\n for product in self.products:\n product_items = price_handle.get_product_items_list(\n self.request,\n product)\n for product_item in product_items:\n if product_item in ['ip']:\n continue\n product_item_types = price_handle.\\\n get_produc_item_types(self.request,\n product,\n product_item)\n for item_type in product_item_types:\n periods = price_handle.get_specific_item_period(\n self.request,\n product,\n product_item,\n item_type\n )\n ret_item = {\n 'product_type': product,\n 'billing_way': item_type,\n 'periods': []\n }\n for period in periods:\n ret_item['periods'].append({\n 'value': period,\n 'display': _(period),\n })\n ret_value.append(ret_item)\n return ret_value\n except Exception as e:\n LOG.error('get billing ways failed,message:%s,products:%s' %\n (e.message, self.products, ))\n return []\n\n def get_object_display(self, obj):\n return obj.ip\n\n def get_initial(self):\n # get product from bss ,we need to sorted our subnet base\n # on the product\n self.product_res = self.get_product_types()\n return {\"product_type_list\": self.product_res}\n\n def get_context_data(self, **kwargs):\n context = super(AllocateView, self).get_context_data(**kwargs)\n try:\n context['usages'] = quotas.tenant_quota_floatingip_usages(\n self.request)\n context['user_type'] = self.get_user_type()\n if not context['user_type']:\n context['product_types'] = self.product_res\n context['billing_ways'] = self.get_billing_ways()\n context['billing_periods'] = self.get_billing_periods()\n context['dragbar'] = self.get_bandwidth_info()\n context[\"official_user\"] = user_type.is_user_official(self.request)\n if not context[\"official_user\"]:\n context['quota_fip_available'] = context['usages'][\n 'floating_ips']['available']\n context['quota_bandwidth_available'] = context['usages'][\n 'bandwidth']['available']\n\n context['quota_deadline'] = \\\n api.service_support.get_quotadeadline(\n self.request)[\"quotaDeadline\"]\n except Exception as e:\n LOG.error(\"Retrieve floating ip infomation error:\"\n \" %s\" % e)\n exceptions.handle(self.request)\n\n return context\n\n\nclass AdjustPlanFormViewBandwidth(adjust_plan.AdjustPlanFormView):\n template_name = 'network/floating_ips/adjust_plan.html'\n success_url = reverse_lazy('horizon:network:floating_ips:index')\n\n def get_context_data(self, **kwargs):\n context = super(AdjustPlanFormViewBandwidth, self).get_context_data(\n **kwargs)\n context['adjustplan_success_url'] = (\n 'horizon:network:floating_ips:adjust_plan')\n return context\n\n\nclass IPDetailView(tabs.TabView):\n tab_group_class = project_tabs.IPDetailTabs\n template_name = 'network/floating_ips/detail.html'\n redirect_url = 'horizon:network:floating_ips:index'\n\n def get_context_data(self, **kwargs):\n context = super(IPDetailView, self).get_context_data(**kwargs)\n context[\"floating_ip\"] = self.get_data()\n return context\n\n @memoized.memoized_method\n def get_data(self):\n ip_id = self.kwargs['ip_id']\n try:\n floating_ip = api.network.tenant_floating_ip_get(self.request,\n ip_id)\n packaged_floating_ip = floating_ip\n packaged_floating_ip.associate_resource = None\n packaged_floating_ip.url_info = None\n is_as_gw_port = api.neutron.FloatingIpManager(\n request=self.request).is_as_gw_port(fip=packaged_floating_ip)\n if is_as_gw_port:\n router = api.neutron.router_get(\n self.request,\n packaged_floating_ip.gw_router_id)\n packaged_floating_ip.associate_resource = router.name\n elif packaged_floating_ip.device_owner == DEVICE_OWNER_INSTANCE:\n instance = api.nova.server_get(\n self.request,\n packaged_floating_ip.instance_id)\n packaged_floating_ip.associate_resource = instance.name\n elif packaged_floating_ip.device_owner == DEVICE_OWNER_LB:\n lb = api.lbaasv2.loadbalancer_get(\n self.request,\n packaged_floating_ip.instance_id)\n packaged_floating_ip.associate_resource = lb.name\n else:\n packaged_floating_ip.url_info = [\n {\n 'name': _(\"Associate to instance\"),\n 'url': reverse(\n 'horizon:network:floating_ips:associate') + (\n '?ip_id=' + str(packaged_floating_ip.id))\n },\n {\n 'name': _('Allocate Route To Floating IP'),\n 'url': reverse(\n 'horizon:network:floating_ips:associateroute',\n args=[packaged_floating_ip.id])\n },\n {\n 'name': _(\"Associate to load balancer\"),\n 'url': reverse(\n 'horizon:network:floating_ips:'\n 'associateloadbalancer',\n args=[packaged_floating_ip.id])\n },\n ]\n packaged_floating_ip.status = STATUS_DISPLAY_CHOICES.get(\n packaged_floating_ip.status)\n except Exception:\n redirect = reverse(self.redirect_url)\n exceptions.handle(self.request,\n _('Unable to retrieve'\n ' details for floating_ip %s.') % ip_id,\n redirect=redirect)\n raise exceptions.Http302(redirect)\n\n is_shared_fip = False\n shared_fip = SharedBandwidthFIP.objects.filter(\n floatingip_id=floating_ip.id).first()\n if shared_fip:\n is_shared_fip = True\n floating_ip.bandwidth = _(shared_fip.shared_bw_type)\n floating_ip.is_shared_fip = is_shared_fip\n\n return floating_ip\n\n def get_tabs(self, request, *args, **kwargs):\n floating_ip = self.get_data()\n return self.tab_group_class(request, floating_ip=floating_ip, **kwargs)\n","sub_path":"horizon/openstack_dashboard/dashboards/network/floating_ips/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":28119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"55802041","text":"from time import sleep\nfrom SimpleCV import Image\n\nJACOPO_PATH = '/Users/dcollins/Desktop/SimpleCV/book/Chapter4/jacopo.png'\n\nimg = Image(JACOPO_PATH)\n\n# Crop starting at +(50, 5)+ for an area 200 pixels wide by 200 pixels tall\ncropImg = img.crop(50, 5, 200, 200)\n#cropImg.show()\n\n# Crop starting at image center\ncropImg = img.crop(img.width / 2, img.height / 2, 200, 200, centered=True)\n#cropImg.show()\n\n# Crop by finding blobs\nblobs = img.findBlobs()\nimg.crop(blobs[-1]).show() # get the largest blob, last in list\nblobs[-1].crop().show() # equivalent to above call\n\n# Crop by slicing image\ncropImg = img[50:250, 5:205]\ncropImg.show()\n\nsleep(3)","sub_path":"chapter4/image_cropping.py","file_name":"image_cropping.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"614229128","text":"\r\nimport os\r\ntry:\r\n from setuptools import setup\r\n from setuptools import Extension\r\nexcept ImportError:\r\n from distutils.core import setup\r\n from distutils.extension import Extension\r\ntry:\r\n from Cython.Distutils import build_ext\r\nexcept ImportError:\r\n from distutils.command.build_ext import build_ext\r\nfrom distutils.errors import CCompilerError, DistutilsExecError, \\\r\n DistutilsPlatformError\r\nimport platform\r\n\r\n\r\nclass TXEntension(build_ext):\r\n # This class allows C extension building to fail.\r\n def run(self):\r\n try:\r\n build_ext.run(self)\r\n except DistutilsPlatformError:\r\n raise Exception(\"BuildFailed\")\r\n\r\n def build_extension(self, ext):\r\n try:\r\n build_ext.build_extension(self, ext)\r\n except (CCompilerError, DistutilsExecError, DistutilsPlatformError):\r\n pass # raise BuildFailed()\r\n\r\n\r\ncmdclass = {}\r\next_modules = []\r\n\r\ninstall_requires = ['flexx >= 0.4.1',\r\n 'future',\r\n 'dataset == 0.8']\r\n\r\n\r\nreadthedocs = os.environ.get('READTHEDOCS') == 'True'\r\n\r\nif not readthedocs:\r\n try:\r\n ext_modules += [\r\n Extension(\"abce.trade\", [\"abce/trade.pyx\"]),\r\n Extension(\"abce.logger.online_variance\", [\"abce/logger/online_variance.pyx\"]),\r\n ]\r\n cmdclass.update({'build_ext': TXEntension})\r\n except ImportError:\r\n ext_modules += [\r\n Extension(\"abce.trade\", [\"abce/trade.c\"]),\r\n Extension(\"abce.logger.online_variance\", [\"abce/logger/online_variance.c\"]),\r\n ]\r\n\r\n if not platform.python_implementation() == \"PyPy\":\r\n install_requires += ['numpy >= 1.10.2p']\r\n if ('APPVEYOR' not in os.environ) or ('TRAVIS' not in os.environ):\r\n install_requires += ['pandas >= 0.17.1',\r\n 'bokeh == 0.12.16',\r\n 'tornado == 4.3']\r\n\r\n\r\nversion = '0.9.5b0'\r\n\r\n\r\nsetup(name='abce',\r\n version=version,\r\n author='Davoud Taghawi-Nejad',\r\n author_email='Davoud@Taghawi-Nejad.de',\r\n description='Agent-Based Complete Economy modelling platform',\r\n url='https://github.com/AB-CE/abce.git',\r\n package_dir={'abce': 'abce',\r\n 'abce.gui': 'abce/gui',\r\n 'abce.agents': 'abce/agents',\r\n 'abce.contracts': 'abce/contracts',\r\n 'abce.logger': 'abce/logger',\r\n },\r\n packages=['abce'],\r\n long_description=open('README.rst').read(),\r\n install_requires=install_requires,\r\n include_package_data=True,\r\n ext_modules=ext_modules,\r\n cmdclass=cmdclass)\r\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"384792768","text":"\"\"\"\r\n\r\nThis is the Entry point for Training the Machine Learning Model.\r\n\r\n\"\"\"\r\n# Doing the necessary imports\r\nfrom sklearn.model_selection import train_test_split\r\nfrom data_ingestion import data_loader\r\nfrom data_preprocessing import preprocessing\r\nfrom data_preprocessing import clustering\r\nfrom best_model_finder import tuner\r\nfrom file_operations import file_methods\r\nfrom application_logging import logger\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n# Creating the common Logging object\r\n\r\n\r\nclass trainModel:\r\n\r\n def __init__(self):\r\n self.log_writer = logger.App_Logger()\r\n self.file_object = open(\"Training_Logs/ModelTrainingLog.txt\", 'a+')\r\n\r\n def trainingModel(self):\r\n # Logging the start of Training\r\n self.log_writer.log(self.file_object, 'Start of Training')\r\n try:\r\n # Getting the data from the source\r\n data_getter = data_loader.Data_Getter(\r\n self.file_object, self.log_writer)\r\n data = data_getter.get_data()\r\n\r\n \"\"\"doing the data preprocessing\"\"\"\r\n\r\n preprocessor = preprocessing.Preprocessor(\r\n self.file_object, self.log_writer)\r\n data = preprocessor.remove_columns(data, ['policy_number', 'policy_bind_date', 'policy_state', 'insured_zip', 'incident_location', 'incident_date', 'incident_state',\r\n 'incident_city', 'insured_hobbies', 'auto_make', 'auto_model', 'auto_year', 'age', 'total_claim_amount']) # remove the column as it doesn't contribute to prediction.\r\n # replacing '?' with NaN values for imputation\r\n data.replace('?', np.NaN, inplace=True)\r\n\r\n # check if missing values are present in the dataset\r\n is_null_present, cols_with_missing_values = preprocessor.is_null_present(\r\n data)\r\n\r\n # if missing values are there, replace them appropriately.\r\n if (is_null_present):\r\n data = preprocessor.impute_missing_values(\r\n data, cols_with_missing_values) # missing value imputation\r\n # encode categorical data\r\n data = preprocessor.encode_categorical_columns(data)\r\n\r\n # create separate features and labels\r\n X, Y = preprocessor.separate_label_feature(\r\n data, label_column_name='fraud_reported')\r\n\r\n \"\"\" Applying the clustering approach\"\"\"\r\n\r\n # object initialization.\r\n kmeans = clustering.KMeansClustering(\r\n self.file_object, self.log_writer)\r\n # using the elbow plot to find the number of optimum clusters\r\n number_of_clusters = kmeans.elbow_plot(X)\r\n\r\n # Divide the data into clusters\r\n X = kmeans.create_clusters(X, number_of_clusters)\r\n\r\n # create a new column in the dataset consisting of the corresponding cluster assignments.\r\n X['Labels'] = Y\r\n\r\n # getting the unique clusters from our dataset\r\n list_of_clusters = X['Cluster'].unique()\r\n\r\n \"\"\"parsing all the clusters and looking for the best ML algorithm to fit on individual cluster\"\"\"\r\n\r\n for i in list_of_clusters:\r\n # filter the data for one cluster\r\n cluster_data = X[X['Cluster'] == i]\r\n\r\n # Prepare the feature and Label columns\r\n cluster_features = cluster_data.drop(\r\n ['Labels', 'Cluster'], axis=1)\r\n cluster_label = cluster_data['Labels']\r\n\r\n # splitting the data into training and test set for each cluster one by one\r\n x_train, x_test, y_train, y_test = train_test_split(\r\n cluster_features, cluster_label, test_size=1 / 3, random_state=355)\r\n # Proceeding with more data pre-processing steps\r\n x_train = preprocessor.scale_numerical_columns(x_train)\r\n x_test = preprocessor.scale_numerical_columns(x_test)\r\n\r\n model_finder = tuner.Model_Finder(\r\n self.file_object, self.log_writer) # object initialization\r\n\r\n # getting the best model for each of the clusters\r\n best_model_name, best_model = model_finder.get_best_model(\r\n x_train, y_train, x_test, y_test)\r\n\r\n # saving the best model to the directory.\r\n file_op = file_methods.File_Operation(\r\n self.file_object, self.log_writer)\r\n save_model = file_op.save_model(\r\n best_model, best_model_name+str(i))\r\n\r\n # logging the successful Training\r\n self.log_writer.log(self.file_object, 'Successful End of Training')\r\n self.file_object.close()\r\n\r\n except Exception as e:\r\n # logging the unsuccessful Training\r\n self.log_writer.log(\r\n self.file_object, 'Unsuccessful End of Training')\r\n self.file_object.close()\r\n raise Exception\r\n","sub_path":"trainingModel.py","file_name":"trainingModel.py","file_ext":"py","file_size_in_byte":5054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"503854846","text":"import logging\n\nlogging.basicConfig(format='%(asctime)s-%(levelname)s-%(name)s - %(message)s')\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\nLIBRARY_KEY_MAPPING = {\n 'BAYS': 'BASSM',\n 'BSA': 'BBSA',\n 'LUBL': 'BIAUL',\n 'ZADAR': 'BICUAZ',\n 'SCHW': 'BLDMV',\n 'WINCK': 'BWINCK',\n 'ATHEN': 'DAIA',\n 'BONN': 'DAIB',\n 'DAMAS': 'DAID',\n 'EURAS': 'DAIE',\n 'RGK': 'DAIF',\n 'DAI': 'DAIG',\n 'ISTAN': 'DAII',\n 'KAIRO': 'DAIK',\n 'MADRD': 'DAIM',\n 'ORIEN': 'DAIO',\n 'PEK': 'DAIP',\n 'ROM': 'DAIR',\n 'SANAA': 'DAIS',\n 'TEHER': 'DAIT',\n 'ZENTR': 'DAIZ',\n 'DEIA': 'DEIA',\n 'DEIJ': 'DEIJ'\n}\n\n\ndef map_aleph_key(aleph_key):\n if aleph_key not in LIBRARY_KEY_MAPPING:\n logger.warning('No library key matches: \"' + aleph_key + '\"')\n return None\n return LIBRARY_KEY_MAPPING[aleph_key]\n","sub_path":"lib/mappings/library_keys.py","file_name":"library_keys.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"132286769","text":"\"\"\"empty message\n\nRevision ID: 53c91a5a28b6\nRevises: a9d2246b2a01\nCreate Date: 2016-10-17 18:43:39.963106\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '53c91a5a28b6'\ndown_revision = 'a9d2246b2a01'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('emailmessage')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('emailmessage',\n sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),\n sa.Column('email_type', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),\n sa.Column('message_recipient', mysql.VARCHAR(length=100), nullable=True),\n sa.Column('message_sender', mysql.VARCHAR(length=100), nullable=True),\n sa.Column('delivered', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True),\n sa.Column('date_delivered', mysql.DATETIME(), nullable=True),\n sa.Column('date_created', mysql.DATETIME(), nullable=True),\n sa.Column('message_subject', mysql.VARCHAR(length=255), nullable=True),\n sa.Column('notification_type_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=False),\n sa.ForeignKeyConstraint(['notification_type_id'], [u'confignotificationtype.id'], name=u'emailmessage_ibfk_1'),\n sa.PrimaryKeyConstraint('id'),\n mysql_default_charset=u'latin1',\n mysql_engine=u'InnoDB'\n )\n ### end Alembic commands ###\n","sub_path":"src/migrations/versions/53c91a5a28b6_.py","file_name":"53c91a5a28b6_.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"397014266","text":"import os\nfrom Core.ComponentList import *\nfrom Core.Dataset import Dataset\nfrom DataMC.NanoAOD.CrossSection import xs_dict\n\nfileName = \"SkimTree.root\"\n#common_path = \"/raid/raid7/lucien/SUSY/RPV/SkimTree/ZMuMu/2018-06-19/BkgMC_ZMuMuSelection_v1/\"\ncommon_path = \"/raid/raid7/kshi/SUSY/RPV/SkimTree/mc/TTbar/\"\ninUFTier2 = False\n#sumw_path = \"/raid/raid7/lucien/SUSY/RPV/SumGenWeight/NanoAOD_InclusiveSelection_v2/%s/EventWeight.root\"\nsumw_path = \"/raid/raid7/kshi/SUSY/RPV/sum_weight/%s/EventWeight.root\"\n\nsampleNames = [n for n in os.listdir(common_path) if os.path.isdir(os.path.join(common_path, n))]\nallMCSamples = []\nfor sampleName in sampleNames:\n if sampleName != \"DYJetsToLL_M50_LO\":\n tmpList = ComponentList([Component(sampleName,\"/\".join([common_path,sampleName,fileName]),\"Events\",inUFTier2,maxEvents=-1)],)\n tmpDataset = Dataset(sampleName,tmpList,xs=xs_dict[sampleName])\n tmpDataset.setSumWeight(sumw_path%sampleName)\n allMCSamples.append(tmpDataset)\n","sub_path":"RPV/SkimTree/NanoAOD/Run2016/TTbar_MC.py","file_name":"TTbar_MC.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"428155630","text":"import matplotlib.pyplot as plt\n\nimport pandas as pd\nimport numpy as np\n\ndf = pd.read_csv('VSD_res.csv', sep=';')\n\nfig, ax = plt.subplots(figsize=(12, 12))\n\nres_img = []\nfor f in sorted(df['f'].unique(), reverse=True):\n tmp = df.loc[df['f'] == f].sort_values(by='H')\n # print(tmp['H'])\n res_img.append(tmp['Vmix'].tolist())\n\nax.imshow(res_img)\nax.set_xlabel(\"H [A/m]\")\nax.set_ylabel(\"Vmix [V]\")\nax.set_title(\"VSD map frequency dispersion\")\nax.legend()\nfig.savefig(\"VSD.png\")","sub_path":"tests/vsd/vsd.py","file_name":"vsd.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"452414712","text":"# coding: UTF-8\n\n# クラスラベルIDからラベル名を取得\nID2LNAME = [\n \"vehicle\",\n \"animal\",\n \"in_the_air\",\n \"on_water\",\n \"on_ground\",\n \"wild_animal\",\n \"pet_animal\",\n \"with_wheel\"\n]\n\n# クラスラベルの種類数\nN_LABELS = len(ID2LNAME)\n\n# ラベル名からクラスラベルIDを取得\ndef LNAME2ID(label_name):\n for i in range(0, N_LABELS):\n if label_name == ID2LNAME[i]:\n return i\n return -1\n","sub_path":"labels.py","file_name":"labels.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"203069890","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# author:wangjin mail: wjwcan10482@163.com\n\n\nimport requests\nimport time\nimport webbrowser\nimport re\n\nweb_page = 'http://10.51.20.82/images/'\nlast_update = None\n\n\nwhile True:\n all_info = requests.get(web_page)\n all_info.encoding = 'utf-8'\n title = re.findall('\"right\">20(.*?)', all_info.text, re.S)\n print(\"本次更新时间最大值:%s\" % max(title))\n cur_update = max(title)\n if not last_update:\n last_update = cur_update\n print(\"上次更新时间 %s\" % last_update)\n if last_update < cur_update:\n webbrowser.open(web_page)\n time.sleep(10) #10秒扫描一次,如果有更新就打开webbrower\n","sub_path":"class04/image_my.py","file_name":"image_my.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"122466024","text":"import pandas as pd\nimport numpy as np\nimport datetime\n\ndef textAnalysis(series):\n\tanalysis = {}\n\tminWordCount = float('inf')\n\tmaxWordCount = 0\n\ttotalWords = 0\n\twordCounts = {}\n\tsumOfWordLengths = 0\n\twordFrequencies = []\n\tfrequencyCount = 0\n\n\taverageWordsPerCell = 0\n\tminWordLength = float('inf')\n\tmaxWordLength = 0\n\n\tfor row in series:\n\t\tif pd.notnull(row):\n\t\t\twords = str(row).split()\n\t\t\tnumberOfWords = len(words)\n\t\t\tif numberOfWords < minWordCount:\n\t\t\t\tminWordCount = numberOfWords\n\t\t\tif numberOfWords > maxWordCount:\n\t\t\t\tmaxWordCount = numberOfWords\n\t\t\ttotalWords += numberOfWords\n\n\t\t\tfor word in words:\n\t\t\t\twordLength = len(word)\n\t\t\t\twordCounts[word] = wordCounts.get(word, 0) + 1\n\t\t\t\tsumOfWordLengths += wordLength\n\t\t\t\tif wordLength < minWordLength:\n\t\t\t\t\tminWordLength = wordLength\n\t\t\t\telif wordLength > maxWordLength:\n\t\t\t\t\tmaxWordLength = wordLength\n\n\taverageWordLength = sumOfWordLengths / totalWords if totalWords > 0 else 0 \n\taverageWordCount = totalWords / series.count() if series.count() > 0 else 0\n\n\tuniqueWords = 0\n\tmaxCount = 0\n\tmostProminentWords = []\n\tfor word, count in wordCounts.iteritems():\n\t\tuniqueWords += 1\n\t\tif count > maxCount:\n\t\t\tmaxCount = count\n\t\t\tmostProminentWords = [word]\n\t\t\tmaxCount = count\n\t\telif count == maxCount:\n\t\t\tmostProminentWords.append(word)\n\tfor w in sorted(wordCounts, key=wordCounts.get, reverse=True):\n\t\t\tif frequencyCount < 50:\n\t\t\t\twordFrequencies.append((w, wordCounts[w]))\n\t\t\t\tfrequencyCount += 1\n\t\t\telse:\n\t\t\t\tbreak\n\t#wordFrequencies = {k: wordCounts[k] for k in wordCounts.keys()[:50]}\n\n\n\tanalysis = {}\n\tanalysis[\"word_count_min\"] = minWordCount\n\tanalysis[\"word_count_max\"] = maxWordCount\n\tanalysis[\"word_count_average\"] = averageWordCount\n\tanalysis[\"word_length_min\"] = minWordLength\n\tanalysis[\"word_length_max\"] = maxWordLength\n\tanalysis[\"word_length_average\"] = averageWordLength\n\tanalysis[\"word_total\"] = totalWords\n\tanalysis[\"word_unique_count\"] = uniqueWords\n\tanalysis[\"word_mode\"] = mostProminentWords\n\tanalysis[\"word_mode_frequency\"] = maxCount\n\tanalysis[\"word_frequencies\"] = wordFrequencies\n\tanalysis[\"invalid\"] = series.isnull().sum()\n\tanalysis.update(genericAnalysis(series))\n\n\treturn analysis\n\ndef numericalAnalysis(series):\n\tif not(type(series) is pd.Series and issubclass(series.dtype.type, np.number)):\n\t\traise ValueError('dcs.analyze.numericalAnalysis takes number pandas.Series as parameter')\n\t\n\tanalysis = series.describe().to_dict()\n\tdel analysis[\"count\"]\n\tanalysis[\"range\"] = analysis[\"max\"] - analysis[\"min\"]\n\tanalysis.update(genericAnalysis(series))\n\tanalysis[\"invalid\"] = series.isnull().sum()\n\n\treturn analysis \n\ndef dateAnalysis(series):\n\tif not(type(series) is pd.Series and issubclass(series.dtype.type, np.datetime64)):\n\t\traise ValueError('dcs.analyze.dateAnalysis takes datetime pandas.Series as parameter')\n\n\tanalysis = genericAnalysis(series)\n\tif 'mode' in analysis:\n\t\tanalysis['mode'] = [datetime.datetime.strftime(x, \"%Y-%m-%dT%H:%M:%SZ\") for x in analysis['mode']]\n\n\tanalysis['frequencies'] = [(datetime.datetime.strftime(value, \"%Y-%m-%dT%H:%M:%SZ\"), count) for (value, count) in analysis['frequencies']]\n\n\tsortedDates = series[series.notnull()].sort_values()\n\tif len(sortedDates) > 0:\n\t\tminimum = sortedDates.iloc[0]\n\t\tmaximum = sortedDates.iloc[-1]\n\t\tmedian = sortedDates.iloc[len(sortedDates) / 2] if len(sortedDates) % 2 == 1 else sortedDates.iloc[(len(sortedDates) / 2) - 1] + (sortedDates.iloc[len(sortedDates) / 2] - sortedDates.iloc[(len(sortedDates) / 2) - 1]) / 2\n\n\tanalysis[\"invalid\"] = series.isnull().sum()\n\tanalysis[\"max\"] = datetime.datetime.strftime(maximum, \"%Y-%m-%dT%H:%M:%SZ\")\n\tanalysis[\"median\"] = datetime.datetime.strftime(median, \"%Y-%m-%dT%H:%M:%SZ\")\n\tanalysis[\"min\"] = datetime.datetime.strftime(minimum, \"%Y-%m-%dT%H:%M:%SZ\")\n\n\treturn analysis\n\n# Returns a dictionary with the following keys:\n#\t'unique_count' (number of unique values), 'frequencies' (list of value-frequency tuples), 'mode' (if mode exists), and 'mode_frequency' (if mode exists) \ndef genericAnalysis(series):\n\tcounts = series.value_counts()\n\tmostFrequentValues = []\n\tfrequencies = []\n\tfirstCount = None\n\tfor value, count in counts.iteritems():\n\t\tif firstCount is None:\n\t\t\tfirstCount = count\n\t\t\n\t\tif count is firstCount:\n\t\t\tmostFrequentValues.append(value)\n\n\t\tfrequencies.append((value, count))\n\n\ttoReturn = {'unique_count' : len(counts)}\n\ttoReturn['frequencies'] = frequencies\n\t\n\tif (len(mostFrequentValues) is 1 or len(mostFrequentValues) < toReturn[\"unique_count\"]) and len(mostFrequentValues) > 0:\n\t\ttoReturn['mode'] = mostFrequentValues\n\t\ttoReturn['mode_frequency'] = firstCount\n\n\treturn toReturn\n\ndef analysisForColumn(df, column):\n\tseries = df[column]\n\tanalysis = {}\n\tif issubclass(series.dtype.type, np.number):\n\t\tanalysis = numericalAnalysis(series)\n\telif issubclass(series.dtype.type, np.datetime64):\n\t\tanalysis = dateAnalysis(series)\n\telse:\n\t\tanalysis = textAnalysis(series)\n\n\treturn analysis","sub_path":"dcs/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":4888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"391237565","text":"'''\nPrint all numbers from 1-150\n'''\nfor x in range(151):\n print(x)\n'''\nprint all the multiples of 5 from 5-1000\n'''\nfor x in range(5, 1001, 5):\n print(x)\n'''\nPrint integers 1 to 100. If divisible by 5,\nprint \"Coding\" instead. If divisible by 10,\nprint \"Coding Dojo\"\n'''\nfor x in range(0, 101):\n if x % 10 == 0:\n print(\"Coding Dojo\")\n elif x % 5 == 0:\n print(\"Coding\")\n else:\n print(x)\nsum = 0\n'''\nAdds the odds from 0 - 500,000\n'''\nfor x in range(0, 500000):\n if x % 2 != 0:\n sum += x\nprint(sum)\n'''\nPrint positive numbers starting at 2018, counting down by fours.\n'''\nfor x in range(2018,0, -4):\n print(x)\n'''\nSet three variables: lowNum, highNum, mult. Starting at lowNum and going through highNum, print only the integers that are a multiple of mult. For example, if lowNum=2, highNum=9, and mult=3, the loop should print 3, 6, 9 (on successive lines)\n'''\nlowNum = 2\nhighNum = 9\nmult = 3\nfor x in range(lowNum, highNum+1):\n if x % mult == 0:\n print(x)","sub_path":"Dojo_Assignments/python_/fundamentals/fundamentals/for_loop_basics_I/for_loop_basics_I.py","file_name":"for_loop_basics_I.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"6248021","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 17 10:44:59 2019\n\n@author: aristizabal\n\"\"\"\n\n#%% User input\n\n# lat and lon bounds\nlon_lim = [-110.0,-10.0]\nlat_lim = [15.0,45.0]\n\n# urls \nurl_glider = 'https://data.ioos.us/gliders/erddap'\nurl_GOFS = 'http://tds.hycom.org/thredds/dodsC/GLBv0.08/expt_93.0/ts3z'\nurl_RTOFS = 'http://nomads.ncep.noaa.gov:9090/dods/rtofs/rtofs_global'\n\n# Bathymetry file\nbath_file = '/Users/aristizabal/Desktop/MARACOOS_project/Maria_scripts/nc_files/GEBCO_2014_2D_-100.0_0.0_-10.0_70.0.nc'\n\n#%%\n\nfrom erddapy import ERDDAP\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport cmocean\n\nfrom datetime import datetime, timedelta\n#from matplotlib.dates import date2num\n\nimport numpy as np\nimport xarray as xr\nimport netCDF4\n\n#%% Get time bounds for the previous day\n\nte = datetime.today() \ntend = datetime(te.year,te.month,te.day)\n\nti = datetime.today() - timedelta(1)\ntini = datetime(ti.year,ti.month,ti.day)\n\n#%% Look for datasets in IOOS glider dac\n \nprint('Looking for glider data sets')\ne = ERDDAP(server = url_glider)\n\n# Grab every dataset available\ndatasets = pd.read_csv(e.get_search_url(response='csv', search_for='all'))\n\n# Search constraints\nkw = {\n 'min_lon': lon_lim[0],\n 'max_lon': lon_lim[1],\n 'min_lat': lat_lim[0],\n 'max_lat': lat_lim[1],\n 'min_time': tini.strftime('%Y-%m-%dT%H:%M:%SZ'),\n 'max_time': tend.strftime('%Y-%m-%dT%H:%M:%SZ'),\n}\n\nsearch_url = e.get_search_url(response='csv', **kw)\n#print(search_url)\n\n# Grab the results\nsearch = pd.read_csv(search_url)\n\n# Extract the IDs\ngliders = search['Dataset ID'].values\n\nmsg = 'Found {} Glider Datasets:\\n\\n{}'.format\nprint(msg(len(gliders), '\\n'.join(gliders)))\n \n# Setting constraints\nconstraints = {\n 'time>=': tini,\n 'time<=': tend,\n 'latitude>=': lat_lim[0],\n 'latitude<=': lat_lim[1],\n 'longitude>=': lon_lim[0],\n 'longitude<=': lon_lim[1],\n }\n\nvariables = [\n 'depth',\n 'latitude',\n 'longitude',\n 'time',\n 'temperature',\n 'salinity'\n ]\n\ne = ERDDAP(\n server=url_glider,\n protocol='tabledap',\n response='nc'\n )\n\n#%% Read GOFS 3.1 output\n\nprint('Retrieving coordinates from model')\nGOFS31 = xr.open_dataset(url_GOFS,decode_times=False)\n \nlatm = GOFS31.lat[:]\nlonm = GOFS31.lon[:]\ndepthm = GOFS31.depth[:]\nttm = GOFS31.time\ntm = netCDF4.num2date(ttm[:],ttm.units) \n\n#tmin = datetime.datetime.strptime(date_ini,'%Y-%m-%dT%H:%M:%SZ')\n#tmax = datetime.datetime.strptime(date_end,'%Y-%m-%dT%H:%M:%SZ')\n\ntmin = tini\ntmax = tend\n\noktimem = np.where(np.logical_and(tm >= tmin, tm <= tmax))\n \ntimem = tm[oktimem]\n\n#%% Reading bathymetry data\n\nncbath = xr.open_dataset(bath_file)\nbath_lat = ncbath.variables['lat'][:]\nbath_lon = ncbath.variables['lon'][:]\nbath_elev = ncbath.variables['elevation'][:]\n\n'''\noklatbath = np.logical_and(bath_lat >= lat_lim[0],bath_lat <= lat_lim[-1])\noklonbath = np.logical_and(bath_lon >= lon_lim[0],bath_lon <= lon_lim[-1])\n\nbath_latsub = bath_lat[oklatbath]\nbath_lonsub = bath_lon[oklonbath]\nbath_elevs = bath_elev[oklatbath,:]\nbath_elevsub = bath_elevs[:,oklonbath] \n''' \n \n#%% \n\nfor id in gliders:\n print('Reading ' + id )\n e.dataset_id = id\n e.constraints = constraints\n e.variables = variables\n \n # Converting glider data to data frame\n df = e.to_pandas(\n index_col='time (UTC)',\n parse_dates=True,\n skiprows=(1,) # units information can be dropped.\n ).dropna()\n \n # Coverting glider vectors into arrays\n timeg, ind = np.unique(df.index.values,return_index=True)\n latg = np.unique(df['latitude (degrees_north)'].values)\n long = np.unique(df['longitude (degrees_east)'].values)\n\n dg = df['depth (m)'].values\n #vg = df['temperature (degree_Celsius)'].values\n vg = df[df.columns[3]].values\n \n delta_z = 0.3\n zn = np.int(np.round(np.max(dg)/delta_z))\n\n depthg = np.empty((zn,len(timeg)))\n depthg[:] = np.nan\n varg = np.empty((zn,len(timeg)))\n varg[:] = np.nan\n \n # Grid variables\n depthg_gridded = np.arange(0,np.nanmax(dg),delta_z)\n varg_gridded = np.empty((len(depthg_gridded),len(timeg)))\n varg_gridded[:] = np.nan\n \n for i,ii in enumerate(ind):\n if i < len(timeg)-1:\n depthg[0:len(dg[ind[i]:ind[i+1]]),i] = dg[ind[i]:ind[i+1]] \n varg[0:len(vg[ind[i]:ind[i+1]]),i] = vg[ind[i]:ind[i+1]]\n else:\n depthg[0:len(dg[ind[i]:len(dg)]),i] = dg[ind[i]:len(dg)] \n varg[0:len(vg[ind[i]:len(vg)]),i] = vg[ind[i]:len(vg)]\n\n for t,tt in enumerate(timeg):\n depthu,oku = np.unique(depthg[:,t],return_index=True)\n varu = varg[oku,t]\n okdd = np.isfinite(depthu)\n depthf = depthu[okdd]\n varf = varu[okdd]\n ok = np.isfinite(varf)\n if np.sum(ok) < 3:\n varg_gridded[:,t] = np.nan\n else:\n okd = depthg_gridded < np.max(depthf[ok])\n varg_gridded[okd,t] = np.interp(depthg_gridded[okd],depthf[ok],varf[ok]) \n \n # Conversion from glider longitude and latitude to GOFS convention\n target_lon = np.empty((len(long),))\n target_lon[:] = np.nan\n for i,ii in enumerate(long):\n if ii < 0: \n target_lon[i] = 360 + ii\n else:\n target_lon[i] = ii\n target_lat = latg\n\n # Changing times to timestamp\n tstamp_glider = [mdates.date2num(timeg[i]) for i in np.arange(len(timeg))]\n tstamp_model = [mdates.date2num(timem[i]) for i in np.arange(len(timem))]\n\n # interpolating glider lon and lat to lat and lon on model time\n sublonm=np.interp(tstamp_model,tstamp_glider,target_lon)\n sublatm=np.interp(tstamp_model,tstamp_glider,target_lat)\n\n # getting the model grid positions for sublonm and sublatm\n oklonm=np.round(np.interp(sublonm,lonm,np.arange(len(lonm)))).astype(int)\n oklatm=np.round(np.interp(sublatm,latm,np.arange(len(latm)))).astype(int)\n \n # Getting glider transect from GOFS 3.1\n print('Getting glider transect from GOFS 3.1. If it breaks is because\\\n GOFS 3.1 server is not responding')\n target_varm = np.empty((len(depthm),len(oktimem[0])))\n target_varm[:] = np.nan\n for i in range(len(oktimem[0])):\n print(len(oktimem[0]),' ',i)\n target_varm[:,i] = GOFS31.variables['water_temp'][oktimem[0][i],:,oklatm[i],oklonm[i]]\n\n target_varm[target_varm < -100] = np.nan\n \n min_val = np.round(np.min([np.nanmin(df[df.columns[3]]),np.nanmin(target_varm)]))\n max_val = np.round(np.max([np.nanmax(df[df.columns[3]]),np.nanmax(target_varm)]))\n\n # plot\n fig, ax = plt.subplots(figsize=(12, 10))\n folder = '/Users/aristizabal/Desktop/MARACOOS_project/Maria_scripts/Figures/Model_glider_comp/'\n \n grid = plt.GridSpec(3, 5, wspace=0.4, hspace=0.3)\n\n # Scatter plot\n ax = plt.subplot(grid[0, :4])\n kw = dict(s=30, c=df[df.columns[3]], marker='*', edgecolor='none')\n cs = ax.scatter(df.index, -df['depth (m)'], **kw, cmap=cmocean.cm.thermal)\n cs.set_clim(min_val,max_val) \n ax.set_xlim(df.index[0], df.index[-1])\n xfmt = mdates.DateFormatter('%H:%Mh\\n%d-%b')\n ax.xaxis.set_major_formatter(xfmt)\n ax.set_xticklabels(' ') \n cbar = fig.colorbar(cs, orientation='vertical')\n cbar.ax.set_ylabel('($^oC$)',fontsize=14,labelpad=15)\n ax.set_ylabel('Depth (m)',fontsize=14)\n plt.title('Along Track Temperature ' + id)\n \n \n nlevels = max_val - min_val + 1\n kw = dict(levels = np.linspace(min_val,max_val,nlevels))\n ax = plt.subplot(grid[1, :4])\n #plt.contour(timeg,-depthg_gridded,varg_gridded,colors = 'lightgrey',**kw)\n cs = plt.contourf(timeg,-depthg_gridded,varg_gridded,cmap=cmocean.cm.thermal,**kw)\n if np.logical_and(min_val<=26.0,max_val>=26.0): \n plt.contour(timeg,-depthg_gridded,varg_gridded,levels=[26],colors='k')\n\n cs = fig.colorbar(cs, orientation='vertical') \n cs.ax.set_ylabel('($^oC$)',fontsize=14,labelpad=15)\n \n ax.set_xlim(df.index[0], df.index[-1])\n ax.set_ylabel('Depth (m)',fontsize=14)\n ax.set_xticklabels(' ')\n plt.title('Along Track Temperature ' + id)\n \n \n ax = plt.subplot(grid[2, :4]) \n #plt.contour(timeg,-depthg_gridded,varg_gridded,colors = 'lightgrey',**kw)\n cs = plt.contourf(timem,-depthm,target_varm,cmap=cmocean.cm.thermal,**kw)\n if np.logical_and(min_val<=26.0,max_val>=26.0): \n plt.contour(timem,-depthm,target_varm,[26],colors='k')\n\n cs = fig.colorbar(cs, orientation='vertical') \n cs.ax.set_ylabel('($^oC$)',fontsize=14,labelpad=15)\n\n ax.set_xlim(df.index[0], df.index[-1])\n ax.set_ylim(-np.max(df['depth (m)']), 0)\n ax.set_ylabel('Depth (m)',fontsize=14)\n xfmt = mdates.DateFormatter('%H:%Mh\\n%d-%b')\n ax.xaxis.set_major_formatter(xfmt)\n plt.title('Along Track Temperature GOFS 3.1')\n \n oklatbath = np.logical_and(bath_lat >= np.min(latg)-5,bath_lat <= np.max(latg)+5)\n oklonbath = np.logical_and(bath_lon >= np.min(long)-5,bath_lon <= np.max(long)+5)\n\n bath_latsub = bath_lat[oklatbath]\n bath_lonsub = bath_lon[oklonbath]\n bath_elevs = bath_elev[oklatbath,:]\n bath_elevsub = bath_elevs[:,oklonbath] \n \n ax = plt.subplot(grid[1, 4:])\n plt.contour(bath_lonsub,bath_latsub,bath_elevsub,[0],colors='k')\n plt.contourf(bath_lonsub,bath_latsub,bath_elevsub,cmap='Blues_r')\n plt.contourf(bath_lonsub,bath_latsub,bath_elevsub,[0,10000],colors='seashell')\n #plt.yticks([])\n #plt.xticks([])\n plt.axis([np.min(long)-5,np.max(long)+5,np.min(latg)-5,np.max(latg)+5])\n plt.plot(long,latg,'.k')\n plt.title('Track ' + id)\n #plt.axis('equal')\n \n folder = '/Users/aristizabal/Desktop/MARACOOS_project/Maria_scripts/Figures/Model_glider_comp/'\n #folder = '/Volumes/hurricane/Hurricane_season_2019/' + ti.strftime('%b-%d') + '/'\n file = folder + 'along_track_temp_' + id + '_' + str(tini).split()[0] + '_' + str(tend).split()[0]\n plt.savefig(file,bbox_inches = 'tight',pad_inches = 0.1) \n \n \n#plt.show()\n","sub_path":"Daily_glider_transects_vs_GOFS31.py","file_name":"Daily_glider_transects_vs_GOFS31.py","file_ext":"py","file_size_in_byte":10089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"442873979","text":"import boto3\nimport datetime\nimport os\nimport json\nimport pprint\n\nif os.path.exists(\"somefile.txt\"):\n os.remove(\"somefile.txt\")\n\nprint(datetime.datetime.now())\nec2resource = boto3.resource('ec2')\nec2client = boto3.client('ec2')\n#This get_name get instance id and rerutn name\ndef get_name(fid):\n ec2instance = ec2resource.Instance(fid)\n instancename = ''\n for tags in ec2instance.tags:\n if tags[\"Key\"] == 'Name':\n instancename = tags[\"Value\"]\n return(instancename)\n#get_name(fid='i-027a13c48bbd73fa3')\n\n# Check events :\nresponse = ec2client.describe_instance_status()\nfor row in response['InstanceStatuses']:\n #print(r['InstanceId'])\n try:\n value = row['Events']\n except KeyError:\n # Key is not present\n print(\"No event for \" + row['InstanceId'] + \" server_name=\" + get_name(fid=row['InstanceId']))\n json_data=row\n x=json.dumps(json_data, sort_keys = True, ensure_ascii=False)\n pprint.pprint(x)\n #print(row)\n with open('somefile.txt', 'a') as f:\n f.write(x)\n f.write(\"\\n\")\n else:\n print(\"There is event for \" + r['InstanceId'] + \" server_name=\" + get_name(fid=row['InstanceId']) + \"Please Check!\")\n\n'''\nNo event for i-0e77545e5a65a2182 server_name=Percona\n('{\"AvailabilityZone\": \"us-east-1c\", \"InstanceId\": \"i-0e77545e5a65a2182\", '\n '\"InstanceState\": {\"Code\": 16, \"Name\": \"running\"}, \"InstanceStatus\": '\n '{\"Details\": [{\"Name\": \"reachability\", \"Status\": \"passed\"}], \"Status\": \"ok\"}, '\n '\"SystemStatus\": {\"Details\": [{\"Name\": \"reachability\", \"Status\": \"passed\"}], '\n '\"Status\": \"ok\"}}')\n\n'''\n\n\n","sub_path":"aws_events7.py","file_name":"aws_events7.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"255724050","text":"import os\nimport numpy as np\nimport cv2\nimport tifffile\nimport math\nimport heapq\nimport copy\nsigma_set = 4\nn_set = 1\n\n\ndef maker_to_points(path):\n points = []\n with open(path) as f:\n lines = f.readlines()\n for line in lines:\n if line == '\\n' or '#' in line:\n continue\n else:\n ss = line.split(',')\n point_3D = [int(float(ss[2])-1), int(float(ss[1])-1), int(float(ss[0])-1)]\n points.append(point_3D)\n f.close()\n return points\n\n\ndef create_ideal(sigma, m_path,n=1):\n sigma1 = int(n * sigma)\n points = maker_to_points(m_path)\n\n image = np.zeros((64,64,64))\n for point in points:\n for x in range(-sigma1, sigma1 + 1):\n for y in range(-sigma1, sigma1 + 1):\n for z in range(-int(sigma1/3), int(sigma1/3) + 1):\n if x * x + y * y + int(sigma1/3)*int(sigma1/3)*z * z <= sigma1 * sigma1:\n if 0 <= point[2] + x < image.shape[2] and 0 <= point[0] + z < image.shape[0] and 0 <= point[1] + y < image.shape[1]:\n image[point[0] + z][point[1] + y][point[2] + x] = int(65535 / (1 * ((2 * np.pi) ** 0.5)) * math.exp(-(x * x + y * y + 4*z * z) / (2*(1**2))))\n\n return image\n\n\npath_ms = r'F:\\NuMorph\\3DNucleiTracingData\\cut_Training\\manual_training'\npath_is = r'F:\\NuMorph\\3DNucleiTracingData\\cut_Training\\ideal_ims_semi'\nfor root, dirs, files in os.walk(path_ms):\n for f in files:\n path_m = root + '/' + f\n path_i = path_is + '/' + f + '.tif'\n image1 = create_ideal(sigma_set,path_m,n=n_set)\n image1 = image1.astype(np.uint16)\n tifffile.imwrite(path_i, image1)\n\n\n","sub_path":"hackathon/gyy/nucleus_detection/creatdata1.py","file_name":"creatdata1.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"343750672","text":"# encoding: utf-8\n\n__version__ = '1.0'\n\n\"\"\"\nPersian\nSimple tool for Persian language localization in Python\nCopyright (C) 2017 Mohammad Reza Kamalifard (mrkamalifard@gmail.com)\n@rezakamalifard\nMIT licensed\nhttps://github.com/itmard/Persian\n\"\"\"\n\nimport re\n\n\n_en_to_fa_numbers_mapping = {\n '0': '۰',\n '1': '۱',\n '2': '۲',\n '3': '۳',\n '4': '۴',\n '5': '۵',\n '6': '۶',\n '7': '۷',\n '8': '۸',\n '9': '۹',\n '.': '.',\n}\n\n_en_to_fa_characters_mapping = {\n 'q': 'ض',\n 'w': 'ص',\n 'e': 'ث',\n 'r': 'ق',\n 't': 'ف',\n 'y': 'غ',\n 'u': 'ع',\n 'i': 'ه',\n 'o': 'خ',\n 'p': 'ح',\n '[': 'ج',\n ']': 'چ',\n 'a': 'ش',\n 's': 'س',\n 'd': 'ی',\n 'f': 'ب',\n 'g': 'ل',\n 'h': 'ا',\n 'j': 'ت',\n 'k': 'ن',\n 'l': 'م',\n ';': 'ک',\n \"'\": '��',\n 'z': 'ظ',\n 'x': 'ط',\n 'c': 'ز',\n 'v': 'ر',\n 'b': 'ذ',\n 'n': 'د',\n 'm': 'پ',\n ',': 'و',\n '?': '؟',\n}\n\n_ar_to_fa_numbers_mapping = {\n '١': '۱', # Arabic 1 is 0x661 and Persian one is 0x6f1\n '٢': '۲', # More info https://goo.gl/SPiBtn\n '٣': '۳',\n '٤': '۴',\n '٥': '۵',\n '٦': '۶',\n '٧': '۷',\n '٨': '۸',\n '٩': '۹',\n '٠': '۰',\n}\n\n_ar_to_fa_characters_mapping = {\n 'ك': 'ک',\n 'دِ': 'د',\n 'بِ': 'ب',\n 'زِ': 'ز',\n 'ذِ': 'ذ',\n 'شِ': 'ش',\n 'سِ': 'س',\n 'ى': 'ی',\n 'ي': 'ی'\n}\n\n_en_to_fa_weekday = {\n 'Sat': 'شنبه',\n 'Sun': 'یکشنبه',\n 'Mon': 'دوشنبه',\n 'Tue': 'سه‌شنبه',\n 'Wed': 'چهارشنبه',\n 'Thu': 'پنجشنبه',\n 'Fri': 'جمعه'\n}\n\n_en_to_fa_short_month = {\n 'Far': 'فروردین',\n 'Ord': 'اردیبهشت',\n 'Kho': 'خرداد',\n 'Tir': 'تیر',\n 'Mor': 'مرداد',\n 'Sha': 'شهریور',\n 'Meh': 'مهر',\n 'Aba': 'آبان',\n 'Aza': 'آذر',\n 'Bah': 'بهمن',\n 'Dey': 'دی',\n 'Esf': 'اسفند'\n}\n\n_en_to_fa_month = {\n 'Farvardin': 'فروردین',\n 'Ordibehesht': 'اردیبهشت',\n 'Khordad': 'خرداد',\n 'Tir': 'تیر',\n 'Mordad': 'مرداد',\n 'Shahrivar': 'شهریور',\n 'Mehr': 'مهر',\n 'Aban': 'آبان',\n 'Azar': 'آذر',\n 'Bahman': 'بهمن',\n 'Dey': 'دی',\n 'Esfand': 'اسفند'\n}\n\n\ndef en_to_fa_numbers(input_str):\n \"\"\"\n Converts English numbers to Persian numbers\n :param input_str: String contains English numbers\n :return: New string with Persian numbers\n \"\"\"\n mapping = _en_to_fa_numbers_mapping\n return _multiple_replace(mapping, input_str)\n\n\ndef en_to_fa_characters(input_str):\n \"\"\"\n Assumes that characters written with standard ferdowsi keyboard\n not windows arabic layout\n :param input_str: String contains English chars\n :return: New string with related characters on Persian standard keyboard\n \"\"\"\n mapping = _en_to_fa_characters_mapping\n return _multiple_replace(mapping, input_str)\n\n\ndef ar_to_fa_numbers(input_str):\n \"\"\"\n Converts Arabic numbers to Persian numbers\n :param input_str: String contains Arabic numbers\n :return: New str and replaces arabic number with ferdowsi numbers\n \"\"\"\n mapping = _ar_to_fa_numbers_mapping\n return _multiple_replace(mapping, input_str)\n\n\ndef ar_to_fa_characters(input_str):\n \"\"\"\n Converts Arabic chars to related Persian unicode char\n :param input_str: String contains Arabic chars\n :return: New str with converted arabic chars\n \"\"\"\n mapping = _ar_to_fa_characters_mapping\n return _multiple_replace(mapping, input_str)\n\n\ndef fa_to_en_numbers(input_str):\n \"\"\"\n Converts Persian numbers to English numbers.\n\n Keyword arguments:\n :param input_str: It should be string\n\n :returns: English numbers\n \"\"\"\n mapping = _reverse_mapping(_en_to_fa_numbers_mapping)\n return _multiple_replace(mapping, input_str)\n\n\ndef fa_to_en_characters(input_str):\n \"\"\"\n Converts Persian characters to English characters.\n\n Keyword arguments:\n :param input_str: String contains Farsi chars\n\n :returns: New str with converted Farsi chars\n \"\"\"\n mapping = _reverse_mapping(_en_to_fa_characters_mapping)\n return _multiple_replace(mapping, input_str)\n\n\ndef fa_to_ar_numbers(input_str):\n mapping = _reverse_mapping(_ar_to_fa_numbers_mapping)\n return _multiple_replace(mapping, input_str)\n\n\n# def fa_to_ar_characters(input_str):\n# mapping = _reverse_mapping(_ar_to_fa_characters_mapping)\n# return _multiple_replace(mapping, input_str)\n\n\ndef en_to_fa_weekday(input_str):\n mapping = _en_to_fa_weekday\n return _multiple_replace(mapping, input_str)\n\n\ndef fa_to_en_weekday(input_str):\n mapping = _reverse_mapping(_en_to_fa_weekday)\n return _multiple_replace(mapping, input_str)\n\n\ndef en_to_fa_month(input_str):\n mapping = _en_to_fa_month\n return _multiple_replace(mapping, input_str)\n\n\ndef fa_to_en_month(input_str):\n mapping = _reverse_mapping(_en_to_fa_month)\n return _multiple_replace(mapping, input_str)\n\n\ndef en_to_fa_short_month(input_str):\n mapping = _en_to_fa_short_month\n return _multiple_replace(mapping, input_str)\n\n\ndef fa_to_en_short_month(input_str):\n mapping = _reverse_mapping(_en_to_fa_short_month)\n return _multiple_replace(mapping, input_str)\n\n\ndef _multiple_replace(mapping, text):\n \"\"\"\n Internal function for replace all mapping keys for a input string\n :param mapping: replacing mapping keys\n :param text: user input string\n :return: New string with converted mapping keys to values\n \"\"\"\n pattern = \"|\".join(map(re.escape, mapping.keys()))\n return re.sub(pattern, lambda m: mapping[m.group()], str(text))\n\n\ndef _reverse_mapping(mapping):\n return {value: key for key, value in mapping.items()}\n","sub_path":"ferdowsi/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"293446537","text":"import os.path as osp\r\nimport glob\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\nimport torch\r\nfrom srrescgan.models.SRResCGAN import Generator\r\nfrom srrescgan.utils import timer\r\nfrom collections import OrderedDict\r\nimport time\r\ntorch.cuda.empty_cache()\r\n\r\n\r\ndef _overlap_crop_forward(x, model, min_size=100000):\r\n \"\"\"\r\n chop for less memory consumption during test\r\n \"\"\"\r\n n_GPUs = 1\r\n scale = 4\r\n b, c, h, w = x.size()\r\n h_e, w_e = h // 4, w // 4\r\n lr_list = [\r\n x[:, :, 0:h_e, 0:w_e],\r\n x[:, :, 0:h_e, w_e:w_e * 2],\r\n x[:, :, 0:h_e, w_e * 2:w_e * 3],\r\n x[:, :, 0:h_e, w_e * 3:w],\r\n\r\n x[:, :, h_e:h_e * 2, 0:w_e],\r\n x[:, :, h_e:h_e * 2, w_e:w_e * 2],\r\n x[:, :, h_e:h_e * 2, w_e * 2:w_e * 3],\r\n x[:, :, h_e:h_e * 2, w_e * 3:w],\r\n\r\n x[:, :, h_e * 2:h_e * 3, 0:w_e],\r\n x[:, :, h_e * 2:h_e * 3, w_e:w_e * 2],\r\n x[:, :, h_e * 2:h_e * 3, w_e * 2:w_e * 3],\r\n x[:, :, h_e * 2:h_e * 3, w_e * 3:w],\r\n\r\n x[:, :, h_e * 3:h, 0:w_e],\r\n x[:, :, h_e * 3:h, w_e:w_e * 2],\r\n x[:, :, h_e * 3:h, w_e * 2:w_e * 3],\r\n x[:, :, h_e * 3:h, w_e * 3:w]\r\n ]\r\n\r\n h, w = scale * h, scale * w\r\n h_f = scale * h_e\r\n w_f = scale * w_e\r\n output = torch.zeros([b, c, h, w])\r\n\r\n if w_e * h_e < min_size:\r\n for i in range(0, 16, n_GPUs):\r\n lr_batch = torch.cat(lr_list[i:(i + n_GPUs)], dim=0)\r\n sr_batch_temp = model(lr_batch)\r\n\r\n if isinstance(sr_batch_temp, list):\r\n sr_batch = sr_batch_temp[-1]\r\n else:\r\n sr_batch = sr_batch_temp\r\n\r\n if i == 0:\r\n output[:, :, 0:h_f, 0:w_f] = sr_batch\r\n elif i == 1:\r\n output[:, :, 0:h_f, w_f:w_f * 2] = sr_batch\r\n elif i == 2:\r\n output[:, :, 0:h_f, w_f * 2:w_f * 3] = sr_batch\r\n elif i == 3:\r\n output[:, :, 0:h_f, w_f * 3:w] = sr_batch\r\n elif i == 4:\r\n output[:, :, h_f:h_f * 2, 0:w_f] = sr_batch\r\n elif i == 5:\r\n output[:, :, h_f:h_f * 2, w_f:w_f * 2] = sr_batch\r\n elif i == 6:\r\n output[:, :, h_f:h_f * 2, w_f * 2:w_f * 3] = sr_batch\r\n elif i == 7:\r\n output[:, :, h_f:h_f * 2, w_f * 3:w] = sr_batch\r\n elif i == 8:\r\n output[:, :, h_f * 2:h_f * 3, 0:w_f] = sr_batch\r\n elif i == 9:\r\n output[:, :, h_f * 2:h_f * 3, w_f:w_f * 2] = sr_batch\r\n elif i == 10:\r\n output[:, :, h_f * 2:h_f * 3, w_f * 2:w_f * 3] = sr_batch\r\n elif i == 11:\r\n output[:, :, h_f * 2:h_f * 3, w_f * 3:w] = sr_batch\r\n elif i == 12:\r\n output[:, :, h_f * 3:h, 0:w_f] = sr_batch\r\n elif i == 13:\r\n output[:, :, h_f * 3:h, w_f:w_f * 2] = sr_batch\r\n elif i == 14:\r\n output[:, :, h_f * 3:h, w_f * 2:w_f * 3] = sr_batch\r\n elif i == 15:\r\n output[:, :, h_f * 3:h, w_f * 3:w] = sr_batch\r\n\r\n del sr_batch\r\n torch.cuda.empty_cache()\r\n\r\n return output\r\n\r\n\r\ndef main_srrescgan():\r\n model_path = 'srrescgan/trained_nets_x4/srrescgan_model.pth' # trained G srfbn of SRResCGAN\r\n device = torch.device('cuda') # if you want to run on CPU, change 'cuda' -> cpu\r\n # device = torch.device('cpu')\r\n\r\n test_img_folder = 'static/uploads/*' # testset LR images path\r\n\r\n model = Generator(scale=4) # SRResCGAN generator net\r\n model.load_state_dict(torch.load(model_path), strict=True)\r\n model.eval()\r\n model = model.to(device)\r\n\r\n print('Model path {:s}. \\nTesting...'.format(model_path))\r\n\r\n test_results = OrderedDict()\r\n test_results['time'] = []\r\n idx = 0\r\n\r\n for path_lr in glob.glob(test_img_folder):\r\n idx += 1\r\n base = osp.splitext(osp.basename(path_lr))[0]\r\n print('Img:', idx, base)\r\n\r\n # read images: LR\r\n img_lr = cv2.imread(path_lr, cv2.IMREAD_COLOR)\r\n img_LR = torch.from_numpy(np.transpose(img_lr[:, :, [2, 1, 0]], (2, 0, 1))).float()\r\n img_LR = img_LR.unsqueeze(0)\r\n img_LR = img_LR.to(device)\r\n\r\n # testing\r\n t = timer()\r\n t.tic()\r\n\r\n with torch.no_grad():\r\n output_SR = _overlap_crop_forward(img_LR, model)\r\n end_time = t.toc()\r\n\r\n output_sr = output_SR.data.squeeze().float().cpu().clamp_(0, 255).numpy()\r\n output_sr = np.transpose(np.squeeze(output_sr), (1, 2, 0))\r\n\r\n test_results['time'].append(end_time)\r\n print('{:->4d}--> {:>10s}, time: {:.4f} sec.'.format(idx, base, end_time))\r\n\r\n # # save images\r\n save_img_path = os.path.join('./static/downloads')\r\n\r\n if not os.path.exists(save_img_path): os.makedirs(save_img_path)\r\n cv2.imwrite(os.path.join(save_img_path, 'SRResCGAN_'+path_lr.split('/')[-1]), cv2.cvtColor(output_sr, cv2.COLOR_RGB2BGR))\r\n del img_LR, img_lr\r\n del output_SR, output_sr\r\n\r\n avg_time = sum(test_results['time']) / len(test_results['time'])\r\n print('Avg. Time:{:.4f}'.format(avg_time))\r\n","sub_path":"srrescgan/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":5170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"506222805","text":"#Using Range and Len Functions\r\n\r\n#[] - Add friends name until done\r\n#[] - Print friends name\r\n#[] -\r\n\r\ndef friends_list(name):\r\n count = 0\r\n friends = []\r\n\r\n while True:\r\n friends.append(name)\r\n\r\n if name == 'done':\r\n for friend in range(friends):\r\n print(friends[friend], \"\")\r\n break\r\n\r\n\r\nfName = input('Enter list of names. If done, type done. ')\r\nlowerfn = fName.lower()\r\n\r\nwhile True:\r\n friends_list(lowerfn)\r\n","sub_path":"RangeLengthFunc.py","file_name":"RangeLengthFunc.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"247612909","text":"import requests\nimport re #导入所需模块\nimport os\nimport time\n\n#定义全局变量 文件名的 编号 (初始值为 1 )\nglobal file_name_num\nfile_name_num = 1\n\n\n#下载文件的方法\ndef download_file(url,folderpath,num,file_name): #参数中 folderpath 为要保存的文件夹的路径 num 为文件编号\n #文件夹不存在则创建文件夹\n folder = os.path.exists(folderpath)\n if not folder:\n os.makedirs(folderpath)\n\n print('---正在下载第' + str(num) + '集>>>>>', folderpath+'/'+file_name+'-'+str(num)+'.mp3')\n\n #读取远程MP3资源\n res = requests.get(url)\n res.raise_for_status()\n\n #设置保存的文件名\n#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n #修改文件名的地方\n filename = os.path.basename(file_name+'-'+str(num)+'.mp3')\n file_path = os.path.join(folderpath, filename)\n\n\n #保存到本地\n mp3_file = open(file_path, 'wb')\n for chunk in res.iter_content(100000):\n mp3_file.write(chunk)\n mp3_file.close()\n print(' ***第'+str(num)+'集下载成功')\n\n #修改文件编号 加 1\n global file_name_num\n file_name_num+=1\n\n\n#获取下载链接的方法\ndef getinfo(url0):\n #请求头\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36\"\n }\n #请求网页数据\n req = requests.get(url=url0, headers=headers)\n req.encoding = 'utf-8'\n html = req.text\n\n #查找下载链接\n res = re.findall(r\"http://(.*).m4a\", html)\n\n #拼合链接\n res = 'http://' + res[0] + '.m4a'\n return res\n\ndef onclick(file_name,store_path,wangzhi,begin0,end0):\n #网页路径\n\n begin = int(begin0)\n end = int(end0)\n\n global file_name_num\n file_name_num = begin\n#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n #修改要下载的小说的网址 以及下载的集数\n wangzhi_1 = wangzhi\n strinfo = re.compile('-(\\d|\\d\\d|\\d\\d\\d|\\d\\d\\d\\d).shtml')\n wangzhi_2 = strinfo.sub('-{}.shtml', wangzhi_1)\n\n urls = [wangzhi_2.format(str(i)) for i in range(begin, end+1)]\n #'https://www.qktsw.net/ting-book-play-3483-1-{}.shtml'\n #https://www.qktsw.net/\n\n print(\"############开始下载############\")\n for url in urls:\n#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n #设置要保存到的路路径\n folderpath = store_path\n #通过函数返回下载链接\n url_download = getinfo(url)\n #开始下载\n\n download_file(url_download, folderpath, file_name_num, file_name)\n\n time.sleep(1)\n\n print(\"############全部下载完成############\")\n\n","sub_path":"next.py","file_name":"next.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"318409125","text":"_base_ = [\n '../_base_/models/cyclegan_lsgan_resnet.py',\n '../_base_/datasets/unpaired_imgs_256x256.py',\n '../_base_/default_runtime.py'\n]\ntrain_cfg = dict(buffer_size=50)\ntest_cfg = None\n\ndomain_a = 'photo'\ndomain_b = 'mask'\nmodel = dict(\n default_domain=domain_a,\n reachable_domains=[domain_a, domain_b],\n related_domains=[domain_a, domain_b],\n gen_auxiliary_loss=[\n dict(\n type='L1Loss',\n loss_weight=10.0,\n loss_name='cycle_loss',\n data_info=dict(\n pred=f'cycle_{domain_a}', target=f'real_{domain_a}'),\n reduction='mean'),\n dict(\n type='L1Loss',\n loss_weight=10.0,\n loss_name='cycle_loss',\n data_info=dict(\n pred=f'cycle_{domain_b}',\n target=f'real_{domain_b}',\n ),\n reduction='mean'),\n dict(\n type='L1Loss',\n loss_weight=0.5,\n loss_name='id_loss',\n data_info=dict(\n pred=f'identity_{domain_a}', target=f'real_{domain_a}'),\n reduction='mean'),\n dict(\n type='L1Loss',\n loss_weight=0.5,\n loss_name='id_loss',\n data_info=dict(\n pred=f'identity_{domain_b}', target=f'real_{domain_b}'),\n reduction='mean')\n ])\ndataroot = './data/unpaired_facades'\ndata = dict(\n train=dict(dataroot=dataroot),\n val=dict(dataroot=dataroot),\n test=dict(dataroot=dataroot))\n\noptimizer = dict(\n generators=dict(type='Adam', lr=0.0002, betas=(0.5, 0.999)),\n discriminators=dict(type='Adam', lr=0.0002, betas=(0.5, 0.999)))\n\n# learning policy\nlr_config = dict(\n policy='Linear', by_epoch=False, target_lr=0, start=40000, interval=400)\n\ncheckpoint_config = dict(interval=10000, save_optimizer=True, by_epoch=False)\ncustom_hooks = [\n dict(\n type='MMGenVisualizationHook',\n output_dir='training_samples',\n res_name_list=['fake_photo'],\n interval=5000)\n]\n\nrunner = None\nuse_ddp_wrapper = True\ntotal_iters = 80000\nworkflow = [('train', 1)]\nexp_name = 'cyclegan_facades'\nwork_dir = f'./work_dirs/experiments/{exp_name}'\nmetrics = dict(\n FID=dict(type='FID', num_images=106, image_shape=(3, 256, 256)),\n IS=dict(type='IS', num_images=106, image_shape=(3, 256, 256)))\n","sub_path":"configs/cyclegan/cyclegan_lsgan_resnet_in_1x1_80k_facades.py","file_name":"cyclegan_lsgan_resnet_in_1x1_80k_facades.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"555378638","text":"from tkinter import *\nimport tkinter.messagebox as MessageBox\nfrom crud import *\n\ndef interface():\n root = Tk()\n root.geometry(\"600x300\")\n root.title(\"Pytkmysql\")\n\n def insert():\n id = id_entry.get()\n name = name_entry.get()\n surname = surname_entry.get()\n\n if (id == \"\" or name == \"\" or surname ==\"\"):\n MessageBox.showinfo(\"Insert Status\",\"All attributes are required ! \")\n else :\n try:\n crud_insert(id,name,surname)\n MessageBox.showinfo(\"Database\", \"OK !!! \")\n except:\n MessageBox.showinfo(\"Database\", \"ERROR !!! \")\n\n def update():\n id = id_entry.get()\n name = name_entry.get()\n surname = surname_entry.get()\n\n if (id == \"\" or name == \"\" or surname ==\"\"):\n MessageBox.showinfo(\"Update Status\",\"All attributes are required ! \")\n else :\n try:\n crud_update(id,name,surname)\n MessageBox.showinfo(\"Database\", \"OK !!! \")\n except:\n MessageBox.showinfo(\"Database\", \"ERROR !!! \")\n\n\n\n id = Label(root, text = 'Enter ID : ', font = ('bold',15))\n id.place(x=20,y=30)\n\n id_entry = Entry()\n id_entry.place(x=120,y=35)\n\n name = Label(root, text = 'Name : ', font = ('bold',15))\n name.place(x=20,y=70)\n\n name_entry = Entry()\n name_entry.place(x=120,y=75)\n\n surname = Label(root, text = 'Surname : ', font = ('bold',15))\n surname.place(x=20,y=110)\n\n surname_entry = Entry()\n surname_entry.place(x=120,y=115)\n\n insert = Button(root, text= \"insert\", font = (\"italic\",10), bg = \"white\",command=insert).place(x = 20, y = 150)\n delete = Button(root, text=\"delete\", font=(\"italic\", 10), bg=\"white\").place(x=120, y=150)\n update = Button(root, text=\"update\", font=(\"italic\", 10), bg=\"white\",command=update).place(x=20, y=200)\n get = Button(root, text=\"get\", font=(\"italic\", 10), bg=\"white\").place(x=120, y=200)\n\n root.mainloop()\n","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"579961360","text":"\ndef marketing_budget():\n print(\"ADVERTISING CAMPAIGN -\" + \"\\n======================\")\n FB_cost = (int(input(\"Facebook campaign budget per day in ILS= \" )))\n print(str(FB_cost) + \"ILS\")\n long_fbcamp = int(input(\"How many days you want FACEBOOK campaign will run? \"))\n print(str(long_fbcamp) + \" days\")\n INST_cost = int(input(\"Instagram campaign budget per day in ILS = \"))\n long_inscamp = int(input(\"How long you want INSTAGRASM campaign will run ? \"))\n Total_Budget = int(input(\"Enter your total budget: \"))\n\n sum_camp = ((FB_cost*long_fbcamp) + (INST_cost*long_inscamp))\n print(\"The total cost of the campaign is : \" + str(sum_camp) + \"ILS\" + \"\\nPLUS TAX: \" + str(sum_camp*1.17))\n\n\n if (sum_camp) > (Total_Budget):\n print(\"Add a : \" + str(sum_camp - Total_Budget) + \"ILS to your budget\\n\\n\\n\")\n else:\n print(\"succesfull\\n\\n\\n\")\n\n\nmarketing_budget()\n\nfrom time import sleep\nfrom random import randint\n\ndef lottery_game():\n guess_no = [2,5,6,11,12,31]\n print( \" YOUR GUESSING NUMBERS IS :\" + str(guess_no))\n game_cost = int(input(\"LOTTERY GAMES ROW COST IN ILS:\"))\n lottery_budget = int(input(\"HOW MUCH MONEY DO YOU HAVE ? \"))\n num_of_rows = lottery_budget//game_cost\n sleep (2)\n print(\"YOU HAVE : \" + str(num_of_rows) + \" ROWS TO PLAY\")\n sleep (2)\n price = 0\n\n for i in range(num_of_rows):\n print(\"GAME NUMBER: \" + str(i+1) + \"\\n-------------\\n\")\n sleep (2)\n win_num = sorted([randint(1, 37) for x in range(6)])\n print(\"THE WINNING NUMBERS ARE : \" + str(win_num))\n check_num = 0\n for i in win_num:\n if i in guess_no:\n check_num += 1\n if check_num == 6:\n price = price + 1\n print(\"YOU WON \" + str(price) + \"M ILS\")\n elif check_num == 5:\n price = price + 5000\n print(\"YOU WON \" + str(price) + \" ILS\")\n elif check_num == 4:\n price = price + 100\n print(\"YOU WON \" + str(price) + \" ILS\")\n elif check_num == 3:\n price = price + 10\n print(\"YOU WON \" + str(price) + \"ILS\")\n else:\n print(\"NEXT TIME\")\n\n print(\"calculating your price...\")\n sleep(3)\n print(\"your price : \" + str(price) + \"ILS\")\n\n\n\nlottery_game()\n\n\n","sub_path":"lesson_1/PYTHON_LAB.py","file_name":"PYTHON_LAB.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"533857715","text":"\"\"\"Implement bilinear interpolation\n\"\"\"\n\nimport numpy\nfrom source.interpolation2d import check_inputs\nfrom source.utilities import combine_coordinates, nanallclose\n\n\ndef interpolate2d(x, y, Z, points):\n \"\"\"Fundamental 2D interpolation routine\n\n Input\n x: 1D array of x-coordinates of the mesh on which to interpolate\n y: 1D array of y-coordinates of the mesh on which to interpolate\n Z: 2D array of values for each x, y pair\n points: Nx2 array of coordinates where interpolated values are sought\n\n Output\n 1D array with same length as points with interpolated values\n\n Notes\n Input coordinates x and y are assumed to be monotonically increasing,\n but need not be equidistantly spaced.\n\n Z is assumed to have dimension M x N, where M = len(x) and N = len(y).\n In other words it is assumed that the x values follow the first\n (vertical) axis downwards and y values the second (horizontal) axis\n from left to right.\n\n If this routine is to be used for interpolation of raster grids where\n data is typically organised with longitudes (x) going from left to\n right and latitudes (y) from left to right then user\n interpolate_raster in this module\n \"\"\"\n\n # Input checks\n x, y, Z, xi, eta = check_inputs(x, y, Z, points, 'linear', False)\n\n # Identify elements that are outside interpolation domain or NaN\n outside = (xi < x[0]) + (eta < y[0]) + (xi > x[-1]) + (eta > y[-1])\n outside += numpy.isnan(xi) + numpy.isnan(eta)\n\n inside = -outside\n xi = xi[inside]\n eta = eta[inside]\n\n # Find upper neighbours for each interpolation point\n idx = numpy.searchsorted(x, xi, side='left')\n idy = numpy.searchsorted(y, eta, side='left')\n\n # Get the four neighbours for each interpolation point\n x0 = x[idx - 1]\n x1 = x[idx]\n y0 = y[idy - 1]\n y1 = y[idy]\n\n z00 = Z[idx - 1, idy - 1]\n z01 = Z[idx - 1, idy]\n z10 = Z[idx, idy - 1]\n z11 = Z[idx, idy]\n\n # Coefficients for weighting between lower and upper bounds\n numpy.seterr(invalid='ignore') # Ignore division by zero\n alpha = (xi - x0) / (x1 - x0)\n beta = (eta - y0) / (y1 - y0)\n\n # Bilinear interpolation formula\n dx = z10 - z00\n dy = z01 - z00\n z = z00 + alpha * dx + beta * dy + alpha * beta * (z11 - dx - dy - z00)\n\n # Populate result with interpolated values for points inside domain\n # and NaN for values outside\n r = numpy.zeros(len(points))\n r[inside] = z\n r[outside] = numpy.nan\n\n return r\n\n\n\ndef linear_function(x, y):\n \"\"\"Auxiliary function for use with interpolation test\n \"\"\"\n\n return x + y / 2.0\n\n\nif __name__ == '__main__':\n\n # ------------------------------------------------------------\n # Interpolation library works for linear function - basic test\n # ------------------------------------------------------------\n\n # Define pixel centers along each direction\n x = [1.0, 2.0, 4.0]\n y = [5.0, 9.0]\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n\n # Test first that original points are reproduced correctly\n for i, xi in enumerate(x):\n for j, eta in enumerate(y):\n val = interpolate2d(x, y, A, [(xi, eta)])[0]\n ref = linear_function(xi, eta)\n assert numpy.allclose(val, ref, rtol=1e-12, atol=1e-12)\n\n # Then test that genuinly interpolated points are correct\n xis = numpy.linspace(x[0], x[-1], 10)\n etas = numpy.linspace(y[0], y[-1], 10)\n points = combine_coordinates(xis, etas)\n\n vals = interpolate2d(x, y, A, points)\n refs = linear_function(points[:, 0], points[:, 1])\n assert numpy.allclose(vals, refs, rtol=1e-12, atol=1e-12)\n\n #-------------------------------------------------------\n # Interpolation library works with grid points being NaN\n #-------------------------------------------------------\n\n # Define pixel centers along each direction\n x = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]\n y = [4.0, 5.0, 7.0, 9.0, 11.0, 13.0]\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n A[2, 3] = numpy.nan # (x=2.0, y=9.0): NaN\n\n # Then test that interpolated points can contain NaN\n xis = numpy.linspace(x[0], x[-1], 12)\n etas = numpy.linspace(y[0], y[-1], 10)\n points = combine_coordinates(xis, etas)\n\n vals = interpolate2d(x, y, A, points)\n refs = linear_function(points[:, 0], points[:, 1])\n\n # Set reference result with expected NaNs and compare\n for i, (xi, eta) in enumerate(points):\n if (1.0 < xi <= 3.0) and (7.0 < eta <= 11.0):\n refs[i] = numpy.nan\n\n assert nanallclose(vals, refs, rtol=1e-12, atol=1e-12)\n\n #-------------------------------------\n # Interpolation library works with NaN\n #-------------------------------------\n\n # Define pixel centers along each direction\n x = numpy.arange(20) * 1.0\n y = numpy.arange(25) * 1.0\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define arbitrary values for each x, y pair\n numpy.random.seed(17)\n A = numpy.random.random((len(x), len(y))) * 10\n\n # Create islands of NaN\n A[5, 13] = numpy.nan\n A[6, 14] = A[6, 18] = numpy.nan\n A[7, 14:18] = numpy.nan\n A[8, 13:18] = numpy.nan\n A[9, 12:19] = numpy.nan\n A[10, 14:17] = numpy.nan\n A[11, 15] = numpy.nan\n\n A[15, 5:6] = numpy.nan\n\n # Create interpolation points\n xis = numpy.linspace(x[0], x[-1], 39) # Hit all mid points\n etas = numpy.linspace(y[0], y[-1], 73) # Hit thirds\n points = combine_coordinates(xis, etas)\n\n vals = interpolate2d(x, y, A, points)\n\n # Calculate reference result with expected NaNs and compare\n i = j = 0\n for k, (xi, eta) in enumerate(points):\n\n # Find indices of nearest higher value in x and y\n i = numpy.searchsorted(x, xi)\n j = numpy.searchsorted(y, eta)\n\n if i > 0 and j > 0:\n\n # Get four neigbours\n A00 = A[i - 1, j - 1]\n A01 = A[i - 1, j]\n A10 = A[i, j - 1]\n A11 = A[i, j]\n\n if numpy.allclose(xi, x[i]):\n alpha = 1.0\n else:\n alpha = 0.5\n\n if numpy.allclose(eta, y[j]):\n beta = 1.0\n else:\n beta = eta - y[j - 1]\n\n\n if numpy.any(numpy.isnan([A00, A01, A10, A11])):\n ref = numpy.nan\n else:\n ref = (A00 * (1 - alpha) * (1 - beta) +\n A01 * (1 - alpha) * beta +\n A10 * alpha * (1 - beta) +\n A11 * alpha * beta)\n\n #print i, j, xi, eta, alpha, beta, vals[k], ref\n assert nanallclose(vals[k], ref, rtol=1e-12, atol=1e-12)\n\n #-----------------------------------------------------------------\n # Interpolation library sensibly handles values outside the domain\n #-----------------------------------------------------------------\n\n # Define pixel centers along each direction\n x = [1.0, 2.0, 4.0]\n y = [5.0, 9.0]\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n\n # Simple example first for debugging\n xis = numpy.linspace(0.9, 4.0, 4)\n etas = numpy.linspace(5, 9.1, 3)\n points = combine_coordinates(xis, etas)\n refs = linear_function(points[:, 0], points[:, 1])\n\n vals = interpolate2d(x, y, A, points)\n msg = ('Length of interpolation points %i differs from length '\n 'of interpolated values %i' % (len(points), len(vals)))\n assert len(points) == len(vals), msg\n for i, (xi, eta) in enumerate(points):\n if xi < x[0] or xi > x[-1] or eta < y[0] or eta > y[-1]:\n assert numpy.isnan(vals[i])\n else:\n msg = ('Got %.15f for (%f, %f), expected %.15f'\n % (vals[i], xi, eta, refs[i]))\n assert numpy.allclose(vals[i], refs[i],\n rtol=1.0e-12, atol=1.0e-12), msg\n\n # Try a range of combinations of points outside domain with\n # error_bounds False\n for lox in [x[0], x[0] - 1, x[0] - 10]:\n for hix in [x[-1], x[-1] + 1, x[-1] + 5]:\n for loy in [y[0], y[0] - 1, y[0] - 10]:\n for hiy in [y[-1], y[-1] + 1, y[-1] + 10]:\n\n # Then test that points outside domain can be handled\n xis = numpy.linspace(lox, hix, 10)\n etas = numpy.linspace(loy, hiy, 10)\n points = combine_coordinates(xis, etas)\n refs = linear_function(points[:, 0], points[:, 1])\n vals = interpolate2d(x, y, A, points)\n\n assert len(points) == len(vals), msg\n for i, (xi, eta) in enumerate(points):\n if xi < x[0] or xi > x[-1] or\\\n eta < y[0] or eta > y[-1]:\n msg = 'Expected NaN for %f, %f' % (xi, eta)\n assert numpy.isnan(vals[i]), msg\n else:\n msg = ('Got %.15f for (%f, %f), expected '\n '%.15f' % (vals[i], xi, eta, refs[i]))\n assert numpy.allclose(vals[i], refs[i],\n rtol=1.0e-12,\n atol=1.0e-12), msg\n\n #-------------------------------------------------------------\n # Interpolation library returns NaN for incomplete grid points\n #-------------------------------------------------------------\n\n # Define four pixel centers\n x = [2.0, 4.0]\n y = [5.0, 9.0]\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n\n # Test that interpolated points are correct\n xis = numpy.linspace(x[0], x[-1], 3)\n etas = numpy.linspace(y[0], y[-1], 3)\n points = combine_coordinates(xis, etas)\n\n # Interpolate to cropped grids\n for xc, yc, Ac in [([x[0]], [y[0]], numpy.array([[A[0, 0]]])), # 1 x 1\n ([x[0]], y, numpy.array([A[0, :]])), # 1 x 2\n ]:\n\n vals = interpolate2d(xc, yc, Ac, points)\n msg = 'Expected NaN when grid %s is incomplete' % str(Ac.shape)\n assert numpy.all(numpy.isnan(vals)), msg\n\n\n\n\n","sub_path":"scipy2012/geoprocessing_tutorial/solutions/exercise6.py","file_name":"exercise6.py","file_ext":"py","file_size_in_byte":11056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"330747968","text":"import os, re, json\nfrom datetime import datetime, timedelta\nimport confusables\n\nimport pendulum\n\nfrom corona import CZDS, WhoisDs, Corona\n\nUSERNAME = os.environ['USERNAME']\nPASSWORD = os.environ['PASSWORD']\n\nclass RunDomainData(object):\n\n _date = datetime.today() - timedelta(days=2)\n _save_path = './data/{folder}/{date}/'\n\n def __run_czds(self):\n print('Running CZDS')\n czds_save_path = self._save_path.format(folder='zone_files',date=self._date.strftime('%Y-%m-%d'))\n czds = CZDS(USERNAME,PASSWORD, save_path=czds_save_path).download()\n print('Completed CZDS')\n\n def __run_whoids(self):\n print('Running WHOIDS')\n whoisds_save_path = self._save_path.format(folder='whoisds_files',date=self._date.strftime('%Y-%m-%d'))\n whoisds = WhoisDs(date=pendulum.now('UTC').add(days=-2), save_path=whoisds_save_path).run()\n print('Completed WHOISDS')\n\n def __run_blacklist(self):\n print('Running Blacklist')\n blacklist_config = open('blacklist.config', 'r').read().split('\\n')\n blacklist_terms = []\n for item in blacklist_config:\n if len(item.strip()) > 0:\n blacklist_terms.append(item.strip())\n term_list = []\n for item in blacklist_terms:\n term_list.append({\n 'term': item,\n 'value': re.compile(confusables.confusable_regex(item, include_character_padding=False), re.IGNORECASE | re.UNICODE)\n })\n corona = Corona().generate(term_list)\n print('Completed Blacklist')\n\n def run(self):\n try:\n self.__run_czds()\n except:\n raise Exception('Error running CZDS')\n\n try:\n self.__run_whoids()\n except:\n raise Exception('Error running WHOISDS')\n\n try:\n self.__run_blacklist()\n except:\n raise Exception('Error running Blacklist')\n \nRunDomainData().run()","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"314555464","text":"############################################\n## Fibonacci, Quantas Chamadas?\n## Para a solução ser aceita, eu rodei o programa com as respostas, e armzenei tudo em listas.\n## Antes de fazer isso, estava dando erro Time Limit Exceeded\n## URI ONLINE - Problema 1029 - Iniciante\n##\n## Created by Jon in 19.06.2016\n##\t\n## Python 3\n\n\ndef calls(n):\n\tif n==0:\n\t\treturn 1\n\telif n==1:\n\t\treturn 1\n\n\treturn calls(n-1) + calls(n-2) + 1\n\ndef fib(n):\n\tif n==0:\n\t\treturn 0\n\telif n==1:\n\t\treturn 1\n\n\treturn fib(n-1) + fib(n-2)\n\n\n# Casos de teste\nqtd_testes = int(input())\n\nfor y in range(qtd_testes):\n\tvalor = int(input())\n\n\tprint (\"fib(%d) = %d calls = %d\" %(valor, calls(valor) - 1, fib(valor)))","sub_path":"ProgramasPython/URI.Problems/problem1029.py","file_name":"problem1029.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"375329974","text":"#!/usr/bin/env python\n\n\ndef fe(x, y, debug=False):\n p = 1\n s = x\n r = y\n\n count = 0\n while (r > 0):\n print('%s: %r mod 2 == %s' % (count, r, r % 2))\n #print('%s: %s' % (count, s))\n if (r % 2 == 1):\n p = p * s\n s = s * s\n r = int(r / 2)\n count += 1\n if debug:\n print('\\tp=%s r=%s s=%s' % (p, r, s))\n\n return p\n\n\ndef main():\n x = 3\n y = 13\n res = fe(x,y, debug=True)\n print(res)\n\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"extras/C960/fast_exponentiation.py","file_name":"fast_exponentiation.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"634124397","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.4 (3310)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/treadmill/sproc/run.py\n# Compiled at: 2017-04-03 02:32:49\n# Size of source mod 2**32: 2072 bytes\n\"\"\"Runs the Treadmill application runner.\"\"\"\nimport signal, logging, os, click\nfrom .. import appmgr\nfrom .. import utils\nfrom .. import logcontext as lc\nfrom ..appmgr import run as app_run\nfrom ..appmgr import abort as app_abort\n_LOGGER = logging.getLogger(__name__)\n\ndef init():\n \"\"\"Top level command handler.\"\"\"\n\n @click.command()\n @click.option('--approot', type=click.Path(exists=True), envvar='TREADMILL_APPROOT', required=True)\n @click.argument('container_dir', type=click.Path(exists=True))\n def run(approot, container_dir):\n \"\"\"Runs container given a container dir.\"\"\"\n with lc.LogContext(_LOGGER, os.path.basename(container_dir), lc.ContainerAdapter) as (log):\n terminated = utils.make_signal_flag(signal.SIGTERM)\n try:\n try:\n log.logger.info('run %r %r', approot, container_dir)\n app_env = appmgr.AppEnvironment(approot)\n watchdog = app_run.create_watchdog(app_env, container_dir)\n app_run.apply_cgroup_limits(app_env, container_dir)\n if not terminated:\n app_run.run(app_env, container_dir, watchdog, terminated)\n except Exception as exc:\n if not terminated:\n log.critical('Failed to start, app will be aborted.', exc_info=True)\n app_abort.flag_aborted(app_env, container_dir, exc)\n else:\n log.logger.info('Exception while handling term, ignore.', exc_info=True)\n\n finally:\n watchdog.remove()\n\n return run","sub_path":"pycfiles/Treadmill-0.0.2-py3.4/run.cpython-34.py","file_name":"run.cpython-34.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"536559649","text":"# Import pygame module\r\nimport pygame\r\n\r\n# Initialize pygame\r\npygame.init()\r\n\r\nwhite = (255,255,255)\r\nred = (255,0,0)\r\n\r\nscreen = pygame.display.set_mode((800,500))\r\n\r\nx = 0\r\ny = 0\r\n\r\nmove_x = 0\r\nmove_y = 0\r\n\r\nclock = pygame.time.Clock()\r\nFPS = 40\r\n\r\nwhile True:\r\n\r\n # Event Handling\r\n for event in pygame.event.get():\r\n #print(event)\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_RIGHT:\r\n move_x = +5\r\n move_y = 0\r\n if event.key == pygame.K_LEFT:\r\n move_x = -5\r\n move_y = 0\r\n if event.key == pygame.K_UP:\r\n move_y = -5\r\n move_x = 0\r\n if event.key == pygame.K_DOWN:\r\n move_y = +5\r\n move_x = 0\r\n\r\n if event.type == pygame.KEYUP:\r\n move_x = 0\r\n move_y = 0\r\n\r\n screen.fill(white)\r\n\r\n pygame.draw.rect(screen, red,[x,y,50,50])\r\n\r\n x += move_x\r\n y += move_y\r\n\r\n pygame.display.update()\r\n clock.tick(FPS)\r\n","sub_path":"CorePython/GameDevelopment/09-GameDevelopment/05-MovingProperly.py","file_name":"05-MovingProperly.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"108718909","text":"#!/usr/bin/env python\n#\n# Copyright 2015-2021 Blizzard Entertainment. Subject to the MIT license.\n# See the included LICENSE file for more information.\n#\n\nimport six\n\nfrom heroprotocol.decoders import CorruptedError, BitPackedBuffer, BitPackedDecoder, VersionedDecoder\n\n\n# Decoding instructions for each protocol type.\ntypeinfos = [\n ('_int',[(0,7)]), #0\n ('_int',[(0,4)]), #1\n ('_int',[(0,5)]), #2\n ('_int',[(0,6)]), #3\n ('_int',[(0,14)]), #4\n ('_int',[(0,22)]), #5\n ('_int',[(0,32)]), #6\n ('_choice',[(0,2),{0:('m_uint6',3),1:('m_uint14',4),2:('m_uint22',5),3:('m_uint32',6)}]), #7\n ('_struct',[[('m_userId',2,-1)]]), #8\n ('_blob',[(0,8)]), #9\n ('_int',[(0,8)]), #10\n ('_struct',[[('m_flags',10,0),('m_major',10,1),('m_minor',10,2),('m_revision',10,3),('m_build',6,4),('m_baseBuild',6,5)]]), #11\n ('_int',[(0,3)]), #12\n ('_bool',[]), #13\n ('_array',[(16,0),10]), #14\n ('_optional',[14]), #15\n ('_blob',[(16,0)]), #16\n ('_struct',[[('m_dataDeprecated',15,0),('m_data',16,1)]]), #17\n ('_struct',[[('m_signature',9,0),('m_version',11,1),('m_type',12,2),('m_elapsedGameLoops',6,3),('m_useScaledTime',13,4),('m_ngdpRootKey',17,5),('m_dataBuildNum',6,6),('m_replayCompatibilityHash',17,7)]]), #18\n ('_fourcc',[]), #19\n ('_blob',[(0,7)]), #20\n ('_int',[(0,64)]), #21\n ('_struct',[[('m_region',10,0),('m_programId',19,1),('m_realm',6,2),('m_name',20,3),('m_id',21,4)]]), #22\n ('_struct',[[('m_a',10,0),('m_r',10,1),('m_g',10,2),('m_b',10,3)]]), #23\n ('_int',[(0,2)]), #24\n ('_optional',[10]), #25\n ('_struct',[[('m_name',9,0),('m_toon',22,1),('m_race',9,2),('m_color',23,3),('m_control',10,4),('m_teamId',1,5),('m_handicap',0,6),('m_observe',24,7),('m_result',24,8),('m_workingSetSlotId',25,9),('m_hero',9,10)]]), #26\n ('_array',[(0,5),26]), #27\n ('_optional',[27]), #28\n ('_blob',[(0,10)]), #29\n ('_blob',[(0,11)]), #30\n ('_struct',[[('m_file',30,0)]]), #31\n ('_optional',[13]), #32\n ('_int',[(-9223372036854775808,64)]), #33\n ('_blob',[(0,12)]), #34\n ('_blob',[(40,0)]), #35\n ('_array',[(0,6),35]), #36\n ('_optional',[36]), #37\n ('_array',[(0,6),30]), #38\n ('_optional',[38]), #39\n ('_struct',[[('m_playerList',28,0),('m_title',29,1),('m_difficulty',9,2),('m_thumbnail',31,3),('m_isBlizzardMap',13,4),('m_restartAsTransitionMap',32,16),('m_timeUTC',33,5),('m_timeLocalOffset',33,6),('m_description',34,7),('m_imageFilePath',30,8),('m_campaignIndex',10,15),('m_mapFileName',30,9),('m_cacheHandles',37,10),('m_miniSave',13,11),('m_gameSpeed',12,12),('m_defaultDifficulty',3,13),('m_modPaths',39,14)]]), #40\n ('_optional',[9]), #41\n ('_optional',[35]), #42\n ('_optional',[6]), #43\n ('_struct',[[('m_race',25,-1)]]), #44\n ('_struct',[[('m_team',25,-1)]]), #45\n ('_blob',[(0,9)]), #46\n ('_struct',[[('m_name',9,-20),('m_clanTag',41,-19),('m_clanLogo',42,-18),('m_highestLeague',25,-17),('m_combinedRaceLevels',43,-16),('m_randomSeed',6,-15),('m_racePreference',44,-14),('m_teamPreference',45,-13),('m_testMap',13,-12),('m_testAuto',13,-11),('m_examine',13,-10),('m_customInterface',13,-9),('m_testType',6,-8),('m_observe',24,-7),('m_hero',46,-6),('m_skin',46,-5),('m_mount',46,-4),('m_banner',46,-3),('m_spray',46,-2),('m_toonHandle',20,-1)]]), #47\n ('_array',[(0,5),47]), #48\n ('_struct',[[('m_lockTeams',13,-16),('m_teamsTogether',13,-15),('m_advancedSharedControl',13,-14),('m_randomRaces',13,-13),('m_battleNet',13,-12),('m_amm',13,-11),('m_competitive',13,-10),('m_practice',13,-9),('m_cooperative',13,-8),('m_noVictoryOrDefeat',13,-7),('m_heroDuplicatesAllowed',13,-6),('m_fog',24,-5),('m_observers',24,-4),('m_userDifficulty',24,-3),('m_clientDebugFlags',21,-2),('m_ammId',43,-1)]]), #49\n ('_int',[(1,4)]), #50\n ('_int',[(1,8)]), #51\n ('_bitarray',[(0,6)]), #52\n ('_bitarray',[(0,8)]), #53\n ('_bitarray',[(0,2)]), #54\n ('_bitarray',[(0,7)]), #55\n ('_struct',[[('m_allowedColors',52,-6),('m_allowedRaces',53,-5),('m_allowedDifficulty',52,-4),('m_allowedControls',53,-3),('m_allowedObserveTypes',54,-2),('m_allowedAIBuilds',55,-1)]]), #56\n ('_array',[(0,5),56]), #57\n ('_struct',[[('m_randomValue',6,-26),('m_gameCacheName',29,-25),('m_gameOptions',49,-24),('m_gameSpeed',12,-23),('m_gameType',12,-22),('m_maxUsers',2,-21),('m_maxObservers',2,-20),('m_maxPlayers',2,-19),('m_maxTeams',50,-18),('m_maxColors',3,-17),('m_maxRaces',51,-16),('m_maxControls',10,-15),('m_mapSizeX',10,-14),('m_mapSizeY',10,-13),('m_mapFileSyncChecksum',6,-12),('m_mapFileName',30,-11),('m_mapAuthorName',9,-10),('m_modFileSyncChecksum',6,-9),('m_slotDescriptions',57,-8),('m_defaultDifficulty',3,-7),('m_defaultAIBuild',0,-6),('m_cacheHandles',36,-5),('m_hasExtensionMod',13,-4),('m_isBlizzardMap',13,-3),('m_isPremadeFFA',13,-2),('m_isCoopMode',13,-1)]]), #58\n ('_optional',[1]), #59\n ('_optional',[2]), #60\n ('_struct',[[('m_color',60,-1)]]), #61\n ('_array',[(0,4),46]), #62\n ('_array',[(0,17),6]), #63\n ('_struct',[[('m_hero',19,-2),('m_tier',10,-1)]]), #64\n ('_array',[(0,10),64]), #65\n ('_struct',[[('m_control',10,-24),('m_userId',59,-23),('m_teamId',1,-22),('m_colorPref',61,-21),('m_racePref',44,-20),('m_difficulty',3,-19),('m_aiBuild',0,-18),('m_handicap',0,-17),('m_observe',24,-16),('m_logoIndex',6,-15),('m_hero',46,-14),('m_skin',46,-13),('m_mount',46,-12),('m_artifacts',62,-11),('m_workingSetSlotId',25,-10),('m_rewards',63,-9),('m_toonHandle',20,-8),('m_tandemLeaderUserId',59,-7),('m_hasSilencePenalty',13,-6),('m_banner',46,-5),('m_spray',46,-4),('m_announcerPack',46,-3),('m_voiceLine',46,-2),('m_heroMasteryTiers',65,-1)]]), #66\n ('_array',[(0,5),66]), #67\n ('_struct',[[('m_phase',12,-11),('m_maxUsers',2,-10),('m_maxObservers',2,-9),('m_slots',67,-8),('m_randomSeed',6,-7),('m_hostUserId',59,-6),('m_isSinglePlayer',13,-5),('m_pickedMapTag',10,-4),('m_gameDuration',6,-3),('m_defaultDifficulty',3,-2),('m_defaultAIBuild',0,-1)]]), #68\n ('_struct',[[('m_userInitialData',48,-3),('m_gameDescription',58,-2),('m_lobbyState',68,-1)]]), #69\n ('_struct',[[('m_syncLobbyState',69,-1)]]), #70\n ('_struct',[[('m_name',20,-1)]]), #71\n ('_blob',[(0,6)]), #72\n ('_struct',[[('m_name',72,-1)]]), #73\n ('_struct',[[('m_name',72,-3),('m_type',6,-2),('m_data',20,-1)]]), #74\n ('_struct',[[('m_type',6,-3),('m_name',72,-2),('m_data',34,-1)]]), #75\n ('_array',[(0,5),10]), #76\n ('_struct',[[('m_signature',76,-2),('m_toonHandle',20,-1)]]), #77\n ('_struct',[[('m_gameFullyDownloaded',13,-14),('m_developmentCheatsEnabled',13,-13),('m_testCheatsEnabled',13,-12),('m_multiplayerCheatsEnabled',13,-11),('m_syncChecksummingEnabled',13,-10),('m_isMapToMapTransition',13,-9),('m_debugPauseEnabled',13,-8),('m_useGalaxyAsserts',13,-7),('m_platformMac',13,-6),('m_cameraFollow',13,-5),('m_baseBuildNum',6,-4),('m_buildNum',6,-3),('m_versionFlags',6,-2),('m_hotkeyProfile',46,-1)]]), #78\n ('_struct',[[]]), #79\n ('_int',[(0,16)]), #80\n ('_struct',[[('x',80,-2),('y',80,-1)]]), #81\n ('_struct',[[('m_which',12,-2),('m_target',81,-1)]]), #82\n ('_struct',[[('m_fileName',30,-5),('m_automatic',13,-4),('m_overwrite',13,-3),('m_name',9,-2),('m_description',29,-1)]]), #83\n ('_int',[(1,32)]), #84\n ('_struct',[[('m_sequence',84,-1)]]), #85\n ('_null',[]), #86\n ('_int',[(0,20)]), #87\n ('_int',[(-2147483648,32)]), #88\n ('_struct',[[('x',87,-3),('y',87,-2),('z',88,-1)]]), #89\n ('_struct',[[('m_targetUnitFlags',80,-7),('m_timer',10,-6),('m_tag',6,-5),('m_snapshotUnitLink',80,-4),('m_snapshotControlPlayerId',59,-3),('m_snapshotUpkeepPlayerId',59,-2),('m_snapshotPoint',89,-1)]]), #90\n ('_choice',[(0,2),{0:('None',86),1:('TargetPoint',89),2:('TargetUnit',90)}]), #91\n ('_struct',[[('m_target',91,-4),('m_time',88,-3),('m_verb',29,-2),('m_arguments',29,-1)]]), #92\n ('_struct',[[('m_data',92,-1)]]), #93\n ('_int',[(0,26)]), #94\n ('_struct',[[('m_abilLink',80,-3),('m_abilCmdIndex',2,-2),('m_abilCmdData',25,-1)]]), #95\n ('_optional',[95]), #96\n ('_choice',[(0,2),{0:('None',86),1:('TargetPoint',89),2:('TargetUnit',90),3:('Data',6)}]), #97\n ('_optional',[89]), #98\n ('_struct',[[('m_cmdFlags',94,-7),('m_abil',96,-6),('m_data',97,-5),('m_vector',98,-4),('m_sequence',84,-3),('m_otherUnit',43,-2),('m_unitGroup',43,-1)]]), #99\n ('_array',[(0,6),2]), #100\n ('_choice',[(0,2),{0:('None',86),1:('Mask',52),2:('OneIndices',100),3:('ZeroIndices',100)}]), #101\n ('_struct',[[('m_unitLink',80,-4),('m_subgroupPriority',10,-3),('m_intraSubgroupPriority',10,-2),('m_count',3,-1)]]), #102\n ('_array',[(0,6),102]), #103\n ('_array',[(0,6),6]), #104\n ('_struct',[[('m_subgroupIndex',2,-4),('m_removeMask',101,-3),('m_addSubgroups',103,-2),('m_addUnitTags',104,-1)]]), #105\n ('_struct',[[('m_controlGroupId',1,-2),('m_delta',105,-1)]]), #106\n ('_struct',[[('m_controlGroupIndex',1,-3),('m_controlGroupUpdate',12,-2),('m_mask',101,-1)]]), #107\n ('_struct',[[('m_count',3,-6),('m_subgroupCount',3,-5),('m_activeSubgroupIndex',2,-4),('m_unitTagsChecksum',6,-3),('m_subgroupIndicesChecksum',6,-2),('m_subgroupsChecksum',6,-1)]]), #108\n ('_struct',[[('m_controlGroupId',1,-2),('m_selectionSyncData',108,-1)]]), #109\n ('_struct',[[('m_chatMessage',29,-1)]]), #110\n ('_struct',[[('m_speed',12,-1)]]), #111\n ('_int',[(-128,8)]), #112\n ('_struct',[[('m_delta',112,-1)]]), #113\n ('_struct',[[('x',88,-2),('y',88,-1)]]), #114\n ('_struct',[[('m_point',114,-4),('m_unit',6,-3),('m_pingedMinimap',13,-2),('m_option',88,-1)]]), #115\n ('_struct',[[('m_verb',29,-2),('m_arguments',29,-1)]]), #116\n ('_struct',[[('m_alliance',6,-2),('m_control',6,-1)]]), #117\n ('_struct',[[('m_unitTag',6,-1)]]), #118\n ('_struct',[[('m_unitTag',6,-2),('m_flags',10,-1)]]), #119\n ('_struct',[[('m_conversationId',88,-2),('m_replyId',88,-1)]]), #120\n ('_optional',[20]), #121\n ('_struct',[[('m_gameUserId',1,-6),('m_observe',24,-5),('m_name',9,-4),('m_toonHandle',121,-3),('m_clanTag',41,-2),('m_clanLogo',42,-1)]]), #122\n ('_array',[(0,5),122]), #123\n ('_int',[(0,1)]), #124\n ('_struct',[[('m_userInfos',123,-2),('m_method',124,-1)]]), #125\n ('_struct',[[('m_button',80,-2),('m_metaKeyFlags',80,-1)]]), #126\n ('_choice',[(0,3),{0:('None',86),1:('Checked',13),2:('ValueChanged',6),3:('SelectionChanged',88),4:('TextChanged',30),5:('MouseEvent',126)}]), #127\n ('_struct',[[('m_controlId',88,-3),('m_eventType',88,-2),('m_eventData',127,-1)]]), #128\n ('_struct',[[('m_soundHash',6,-2),('m_length',6,-1)]]), #129\n ('_array',[(0,7),6]), #130\n ('_struct',[[('m_soundHash',130,-2),('m_length',130,-1)]]), #131\n ('_struct',[[('m_syncInfo',131,-1)]]), #132\n ('_struct',[[('m_queryId',80,-3),('m_lengthMs',6,-2),('m_finishGameLoop',6,-1)]]), #133\n ('_struct',[[('m_queryId',80,-2),('m_lengthMs',6,-1)]]), #134\n ('_struct',[[('m_animWaitQueryId',80,-1)]]), #135\n ('_struct',[[('m_sound',6,-1)]]), #136\n ('_struct',[[('m_transmissionId',88,-2),('m_thread',6,-1)]]), #137\n ('_struct',[[('m_transmissionId',88,-1)]]), #138\n ('_optional',[81]), #139\n ('_optional',[80]), #140\n ('_optional',[112]), #141\n ('_struct',[[('m_target',139,-6),('m_distance',140,-5),('m_pitch',140,-4),('m_yaw',140,-3),('m_reason',141,-2),('m_follow',13,-1)]]), #142\n ('_struct',[[('m_skipType',124,-1)]]), #143\n ('_int',[(0,11)]), #144\n ('_struct',[[('x',144,-2),('y',144,-1)]]), #145\n ('_struct',[[('m_button',6,-5),('m_down',13,-4),('m_posUI',145,-3),('m_posWorld',89,-2),('m_flags',112,-1)]]), #146\n ('_struct',[[('m_posUI',145,-3),('m_posWorld',89,-2),('m_flags',112,-1)]]), #147\n ('_struct',[[('m_achievementLink',80,-1)]]), #148\n ('_struct',[[('m_hotkey',6,-2),('m_down',13,-1)]]), #149\n ('_struct',[[('m_abilLink',80,-3),('m_abilCmdIndex',2,-2),('m_state',112,-1)]]), #150\n ('_struct',[[('m_soundtrack',6,-1)]]), #151\n ('_struct',[[('m_key',112,-2),('m_flags',112,-1)]]), #152\n ('_struct',[[('m_error',88,-2),('m_abil',96,-1)]]), #153\n ('_int',[(0,19)]), #154\n ('_struct',[[('m_decrementMs',154,-1)]]), #155\n ('_struct',[[('m_portraitId',88,-1)]]), #156\n ('_struct',[[('m_functionName',20,-1)]]), #157\n ('_struct',[[('m_result',88,-1)]]), #158\n ('_struct',[[('m_gameMenuItemIndex',88,-1)]]), #159\n ('_int',[(-32768,16)]), #160\n ('_struct',[[('m_wheelSpin',160,-2),('m_flags',112,-1)]]), #161\n ('_struct',[[('m_button',80,-1)]]), #162\n ('_struct',[[('m_cutsceneId',88,-2),('m_bookmarkName',20,-1)]]), #163\n ('_struct',[[('m_cutsceneId',88,-1)]]), #164\n ('_struct',[[('m_cutsceneId',88,-3),('m_conversationLine',20,-2),('m_altConversationLine',20,-1)]]), #165\n ('_struct',[[('m_cutsceneId',88,-2),('m_conversationLine',20,-1)]]), #166\n ('_struct',[[('m_leaveReason',2,-1)]]), #167\n ('_struct',[[('m_observe',24,-7),('m_name',9,-6),('m_toonHandle',121,-5),('m_clanTag',41,-4),('m_clanLogo',42,-3),('m_hijack',13,-2),('m_hijackCloneGameUserId',59,-1)]]), #168\n ('_optional',[84]), #169\n ('_struct',[[('m_state',24,-2),('m_sequence',169,-1)]]), #170\n ('_struct',[[('m_sequence',169,-2),('m_target',89,-1)]]), #171\n ('_struct',[[('m_sequence',169,-2),('m_target',90,-1)]]), #172\n ('_struct',[[('m_catalog',10,-4),('m_entry',80,-3),('m_field',9,-2),('m_value',9,-1)]]), #173\n ('_struct',[[('m_index',6,-1)]]), #174\n ('_struct',[[('m_shown',13,-1)]]), #175\n ('_struct',[[('m_recipient',12,-2),('m_string',30,-1)]]), #176\n ('_struct',[[('m_recipient',12,-2),('m_point',114,-1)]]), #177\n ('_struct',[[('m_progress',88,-1)]]), #178\n ('_struct',[[('m_status',24,-1)]]), #179\n ('_struct',[[('m_abilLink',80,-3),('m_abilCmdIndex',2,-2),('m_buttonLink',80,-1)]]), #180\n ('_struct',[[('m_behaviorLink',80,-2),('m_buttonLink',80,-1)]]), #181\n ('_choice',[(0,2),{0:('None',86),1:('Ability',180),2:('Behavior',181),3:('Vitals',160)}]), #182\n ('_struct',[[('m_announcement',182,-4),('m_announceLink',80,-3),('m_otherUnitTag',6,-2),('m_unitTag',6,-1)]]), #183\n ('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_unitTypeName',29,2),('m_controlPlayerId',1,3),('m_upkeepPlayerId',1,4),('m_x',10,5),('m_y',10,6)]]), #184\n ('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_x',10,2),('m_y',10,3)]]), #185\n ('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_killerPlayerId',59,2),('m_x',10,3),('m_y',10,4),('m_killerUnitTagIndex',43,5),('m_killerUnitTagRecycle',43,6)]]), #186\n ('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_controlPlayerId',1,2),('m_upkeepPlayerId',1,3)]]), #187\n ('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_unitTypeName',29,2)]]), #188\n ('_struct',[[('m_playerId',1,0),('m_upgradeTypeName',29,1),('m_count',88,2)]]), #189\n ('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1)]]), #190\n ('_array',[(0,10),88]), #191\n ('_struct',[[('m_firstUnitIndex',6,0),('m_items',191,1)]]), #192\n ('_struct',[[('m_playerId',1,0),('m_type',6,1),('m_userId',43,2),('m_slotId',43,3)]]), #193\n ('_struct',[[('m_key',29,0)]]), #194\n ('_struct',[[('__parent',194,0),('m_value',29,1)]]), #195\n ('_array',[(0,6),195]), #196\n ('_optional',[196]), #197\n ('_struct',[[('__parent',194,0),('m_value',88,1)]]), #198\n ('_array',[(0,6),198]), #199\n ('_optional',[199]), #200\n ('_struct',[[('m_eventName',29,0),('m_stringData',197,1),('m_intData',200,2),('m_fixedData',200,3)]]), #201\n ('_struct',[[('m_value',6,0),('m_time',6,1)]]), #202\n ('_array',[(0,6),202]), #203\n ('_array',[(0,5),203]), #204\n ('_struct',[[('m_name',29,0),('m_values',204,1)]]), #205\n ('_array',[(0,21),205]), #206\n ('_struct',[[('m_instanceList',206,0)]]), #207\n ('_struct',[[('m_hero',29,0),('m_controllingTeam',6,1)]]), #208\n ('_struct',[[('m_hero',29,0),('m_controllingPlayer',6,1)]]), #209\n ('_struct',[[('m_hero',29,0),('m_newControllingPlayer',6,1)]]), #210\n]\n\n# Map from protocol NNet.Game.*Event eventid to (typeid, name)\ngame_event_types = {\n 5: (79, 'NNet.Game.SUserFinishedLoadingSyncEvent'),\n 7: (78, 'NNet.Game.SUserOptionsEvent'),\n 9: (71, 'NNet.Game.SBankFileEvent'),\n 10: (73, 'NNet.Game.SBankSectionEvent'),\n 11: (74, 'NNet.Game.SBankKeyEvent'),\n 12: (75, 'NNet.Game.SBankValueEvent'),\n 13: (77, 'NNet.Game.SBankSignatureEvent'),\n 14: (82, 'NNet.Game.SCameraSaveEvent'),\n 21: (83, 'NNet.Game.SSaveGameEvent'),\n 22: (79, 'NNet.Game.SSaveGameDoneEvent'),\n 23: (79, 'NNet.Game.SLoadGameDoneEvent'),\n 25: (85, 'NNet.Game.SCommandManagerResetEvent'),\n 26: (93, 'NNet.Game.SGameCheatEvent'),\n 27: (99, 'NNet.Game.SCmdEvent'),\n 28: (106, 'NNet.Game.SSelectionDeltaEvent'),\n 29: (107, 'NNet.Game.SControlGroupUpdateEvent'),\n 30: (109, 'NNet.Game.SSelectionSyncCheckEvent'),\n 32: (110, 'NNet.Game.STriggerChatMessageEvent'),\n 34: (111, 'NNet.Game.SSetAbsoluteGameSpeedEvent'),\n 35: (113, 'NNet.Game.SAddAbsoluteGameSpeedEvent'),\n 36: (115, 'NNet.Game.STriggerPingEvent'),\n 37: (116, 'NNet.Game.SBroadcastCheatEvent'),\n 38: (117, 'NNet.Game.SAllianceEvent'),\n 39: (118, 'NNet.Game.SUnitClickEvent'),\n 40: (119, 'NNet.Game.SUnitHighlightEvent'),\n 41: (120, 'NNet.Game.STriggerReplySelectedEvent'),\n 43: (125, 'NNet.Game.SHijackReplayGameEvent'),\n 44: (79, 'NNet.Game.STriggerSkippedEvent'),\n 45: (129, 'NNet.Game.STriggerSoundLengthQueryEvent'),\n 46: (136, 'NNet.Game.STriggerSoundOffsetEvent'),\n 47: (137, 'NNet.Game.STriggerTransmissionOffsetEvent'),\n 48: (138, 'NNet.Game.STriggerTransmissionCompleteEvent'),\n 49: (142, 'NNet.Game.SCameraUpdateEvent'),\n 50: (79, 'NNet.Game.STriggerAbortMissionEvent'),\n 55: (128, 'NNet.Game.STriggerDialogControlEvent'),\n 56: (132, 'NNet.Game.STriggerSoundLengthSyncEvent'),\n 57: (143, 'NNet.Game.STriggerConversationSkippedEvent'),\n 58: (146, 'NNet.Game.STriggerMouseClickedEvent'),\n 59: (147, 'NNet.Game.STriggerMouseMovedEvent'),\n 60: (148, 'NNet.Game.SAchievementAwardedEvent'),\n 61: (149, 'NNet.Game.STriggerHotkeyPressedEvent'),\n 62: (150, 'NNet.Game.STriggerTargetModeUpdateEvent'),\n 64: (151, 'NNet.Game.STriggerSoundtrackDoneEvent'),\n 66: (152, 'NNet.Game.STriggerKeyPressedEvent'),\n 67: (157, 'NNet.Game.STriggerMovieFunctionEvent'),\n 76: (153, 'NNet.Game.STriggerCommandErrorEvent'),\n 86: (79, 'NNet.Game.STriggerMovieStartedEvent'),\n 87: (79, 'NNet.Game.STriggerMovieFinishedEvent'),\n 88: (155, 'NNet.Game.SDecrementGameTimeRemainingEvent'),\n 89: (156, 'NNet.Game.STriggerPortraitLoadedEvent'),\n 90: (158, 'NNet.Game.STriggerCustomDialogDismissedEvent'),\n 91: (159, 'NNet.Game.STriggerGameMenuItemSelectedEvent'),\n 92: (161, 'NNet.Game.STriggerMouseWheelEvent'),\n 95: (162, 'NNet.Game.STriggerButtonPressedEvent'),\n 96: (79, 'NNet.Game.STriggerGameCreditsFinishedEvent'),\n 97: (163, 'NNet.Game.STriggerCutsceneBookmarkFiredEvent'),\n 98: (164, 'NNet.Game.STriggerCutsceneEndSceneFiredEvent'),\n 99: (165, 'NNet.Game.STriggerCutsceneConversationLineEvent'),\n 100: (166, 'NNet.Game.STriggerCutsceneConversationLineMissingEvent'),\n 101: (167, 'NNet.Game.SGameUserLeaveEvent'),\n 102: (168, 'NNet.Game.SGameUserJoinEvent'),\n 103: (170, 'NNet.Game.SCommandManagerStateEvent'),\n 104: (171, 'NNet.Game.SCmdUpdateTargetPointEvent'),\n 105: (172, 'NNet.Game.SCmdUpdateTargetUnitEvent'),\n 106: (133, 'NNet.Game.STriggerAnimLengthQueryByNameEvent'),\n 107: (134, 'NNet.Game.STriggerAnimLengthQueryByPropsEvent'),\n 108: (135, 'NNet.Game.STriggerAnimOffsetEvent'),\n 109: (173, 'NNet.Game.SCatalogModifyEvent'),\n 110: (174, 'NNet.Game.SHeroTalentTreeSelectedEvent'),\n 111: (79, 'NNet.Game.STriggerProfilerLoggingFinishedEvent'),\n 112: (175, 'NNet.Game.SHeroTalentTreeSelectionPanelToggledEvent'),\n}\n\n# The typeid of the NNet.Game.EEventId enum.\ngame_eventid_typeid = 0\n\n# Map from protocol NNet.Game.*Message eventid to (typeid, name)\nmessage_event_types = {\n 0: (176, 'NNet.Game.SChatMessage'),\n 1: (177, 'NNet.Game.SPingMessage'),\n 2: (178, 'NNet.Game.SLoadingProgressMessage'),\n 3: (79, 'NNet.Game.SServerPingMessage'),\n 4: (179, 'NNet.Game.SReconnectNotifyMessage'),\n 5: (183, 'NNet.Game.SPlayerAnnounceMessage'),\n}\n\n# The typeid of the NNet.Game.EMessageId enum.\nmessage_eventid_typeid = 1\n\n# Map from protocol NNet.Replay.Tracker.*Event eventid to (typeid, name)\ntracker_event_types = {\n 1: (184, 'NNet.Replay.Tracker.SUnitBornEvent'),\n 2: (186, 'NNet.Replay.Tracker.SUnitDiedEvent'),\n 3: (187, 'NNet.Replay.Tracker.SUnitOwnerChangeEvent'),\n 4: (188, 'NNet.Replay.Tracker.SUnitTypeChangeEvent'),\n 5: (189, 'NNet.Replay.Tracker.SUpgradeEvent'),\n 6: (184, 'NNet.Replay.Tracker.SUnitInitEvent'),\n 7: (190, 'NNet.Replay.Tracker.SUnitDoneEvent'),\n 8: (192, 'NNet.Replay.Tracker.SUnitPositionsEvent'),\n 9: (193, 'NNet.Replay.Tracker.SPlayerSetupEvent'),\n 10: (201, 'NNet.Replay.Tracker.SStatGameEvent'),\n 11: (207, 'NNet.Replay.Tracker.SScoreResultEvent'),\n 12: (185, 'NNet.Replay.Tracker.SUnitRevivedEvent'),\n 13: (208, 'NNet.Replay.Tracker.SHeroBannedEvent'),\n 14: (209, 'NNet.Replay.Tracker.SHeroPickedEvent'),\n 15: (210, 'NNet.Replay.Tracker.SHeroSwappedEvent'),\n}\n\n# The typeid of the NNet.Replay.Tracker.EEventId enum.\ntracker_eventid_typeid = 2\n\n# The typeid of NNet.SVarUint32 (the type used to encode gameloop deltas).\nsvaruint32_typeid = 7\n\n# The typeid of NNet.Replay.SGameUserId (the type used to encode player ids).\nreplay_userid_typeid = 8\n\n# The typeid of NNet.Replay.SHeader (the type used to store replay game version and length).\nreplay_header_typeid = 18\n\n# The typeid of NNet.Game.SDetails (the type used to store overall replay details).\ngame_details_typeid = 40\n\n# The typeid of NNet.Replay.SInitData (the type used to store the initial lobby).\nreplay_initdata_typeid = 70\n\n\ndef _varuint32_value(value):\n # Returns the numeric value from a SVarUint32 instance.\n for v in six.itervalues(value):\n return v\n return 0\n\n\ndef _decode_event_stream(decoder, eventid_typeid, event_types, decode_user_id):\n # Decodes events prefixed with a gameloop and possibly userid\n gameloop = 0\n while not decoder.done():\n start_bits = decoder.used_bits()\n\n # decode the gameloop delta before each event\n delta = _varuint32_value(decoder.instance(svaruint32_typeid))\n gameloop += delta\n\n # decode the userid before each event\n if decode_user_id:\n userid = decoder.instance(replay_userid_typeid)\n\n # decode the event id\n eventid = decoder.instance(eventid_typeid)\n typeid, typename = event_types.get(eventid, (None, None))\n if typeid is None:\n raise CorruptedError('eventid(%d) at %s' % (eventid, decoder))\n\n # decode the event struct instance\n event = decoder.instance(typeid)\n event['_event'] = typename\n event['_eventid'] = eventid\n\n # insert gameloop and userid\n event['_gameloop'] = gameloop\n if decode_user_id:\n event['_userid'] = userid\n\n # the next event is byte aligned\n decoder.byte_align()\n\n # insert bits used in stream\n event['_bits'] = decoder.used_bits() - start_bits\n\n yield event\n\n\ndef decode_replay_game_events(contents):\n \"\"\"Decodes and yields each game event from the contents byte string.\"\"\"\n decoder = BitPackedDecoder(contents, typeinfos)\n for event in _decode_event_stream(decoder,\n game_eventid_typeid,\n game_event_types,\n decode_user_id=True):\n yield event\n\n\ndef decode_replay_message_events(contents):\n \"\"\"Decodes and yields each message event from the contents byte string.\"\"\"\n decoder = BitPackedDecoder(contents, typeinfos)\n for event in _decode_event_stream(decoder,\n message_eventid_typeid,\n message_event_types,\n decode_user_id=True):\n yield event\n\n\ndef decode_replay_tracker_events(contents):\n \"\"\"Decodes and yields each tracker event from the contents byte string.\"\"\"\n decoder = VersionedDecoder(contents, typeinfos)\n for event in _decode_event_stream(decoder,\n tracker_eventid_typeid,\n tracker_event_types,\n decode_user_id=False):\n yield event\n\n\ndef decode_replay_header(contents):\n \"\"\"Decodes and return the replay header from the contents byte string.\"\"\"\n decoder = VersionedDecoder(contents, typeinfos)\n return decoder.instance(replay_header_typeid)\n\n\ndef decode_replay_details(contents):\n \"\"\"Decodes and returns the game details from the contents byte string.\"\"\"\n decoder = VersionedDecoder(contents, typeinfos)\n return decoder.instance(game_details_typeid)\n\n\ndef decode_replay_initdata(contents):\n \"\"\"Decodes and return the replay init data from the contents byte string.\"\"\"\n decoder = BitPackedDecoder(contents, typeinfos)\n return decoder.instance(replay_initdata_typeid)\n\n\ndef decode_replay_attributes_events(contents):\n \"\"\"Decodes and yields each attribute from the contents byte string.\"\"\"\n buffer = BitPackedBuffer(contents, 'little')\n attributes = {}\n if not buffer.done():\n attributes['source'] = buffer.read_bits(8)\n attributes['mapNamespace'] = buffer.read_bits(32)\n _ = buffer.read_bits(32)\n attributes['scopes'] = {}\n while not buffer.done():\n value = {}\n value['namespace'] = buffer.read_bits(32)\n value['attrid'] = attrid = buffer.read_bits(32)\n scope = buffer.read_bits(8)\n value['value'] = buffer.read_aligned_bytes(4)[::-1].strip(b'\\x00')\n if not scope in attributes['scopes']:\n attributes['scopes'][scope] = {}\n if not attrid in attributes['scopes'][scope]:\n attributes['scopes'][scope][attrid] = []\n attributes['scopes'][scope][attrid].append(value)\n return attributes\n\n\ndef unit_tag(unitTagIndex, unitTagRecycle):\n return (unitTagIndex << 18) + unitTagRecycle\n\n\ndef unit_tag_index(unitTag):\n return (unitTag >> 18) & 0x00003fff\n\n\ndef unit_tag_recycle(unitTag):\n return (unitTag) & 0x0003ffff\n","sub_path":"heroprotocol/versions/protocol58209.py","file_name":"protocol58209.py","file_ext":"py","file_size_in_byte":26610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"546036961","text":"import asyncio\nimport pytest\n\nfrom aiopluggy import *\n\n\nhookspec = HookspecMarker(\"example\")\nhookimpl = HookimplMarker(\"example\")\n\n\n@pytest.mark.asyncio\n# @pytest.mark.filterwarnings('ignore:pm.register')\nasync def test_async(pm: PluginManager):\n out = []\n before = []\n\n class HookSpec(object):\n @hookspec\n def some_method(self, arg):\n pass\n\n class PluginBefore1(object):\n @hookimpl.before\n def some_method(self, arg):\n before.append(arg + 1)\n\n class PluginBefore2(object):\n @hookimpl.before\n async def some_method(self, arg):\n before.append(arg + 2)\n\n class Plugin1(object):\n @hookimpl\n async def some_method(self, arg):\n await asyncio.sleep(.1)\n out.append(arg + 1)\n return arg + 1\n\n class Plugin2(object):\n @hookimpl\n def some_method(self, arg):\n out.append(arg + 2)\n return arg + 2\n\n pm.register_specs(HookSpec())\n pm.register(PluginBefore1())\n pm.register(PluginBefore2())\n pm.register(Plugin1())\n pm.register(Plugin2())\n results = await pm.hooks.some_method(arg=0)\n values = {result.value for result in results}\n assert values == {1, 2}\n assert len(out) == len(values)\n assert set(out) == values\n assert len(before) == 2\n assert set(before) == {1, 2}\n\n\ndef test_sync(pm: PluginManager):\n out = []\n\n class HookSpec(object):\n @hookspec.sync\n def some_method(self, arg):\n pass\n\n class Plugin1(object):\n @hookimpl\n def some_method(self, arg):\n out.append(arg + 1)\n return arg + 1\n\n class Plugin2(object):\n @hookimpl.try_first\n def some_method(self, arg):\n out.append(arg + 2)\n return arg + 2\n\n class Plugin3(object):\n @hookimpl.try_last\n def some_method(self, arg):\n out.append(arg + 3)\n return arg + 3\n\n class Plugin4(object):\n @hookimpl.try_first\n def some_method(self, arg):\n out.append(arg + 4)\n return arg + 4\n\n class Plugin5(object):\n @hookimpl.try_last\n def some_method(self, arg):\n out.append(arg + 5)\n return arg + 5\n\n class Plugin6(object):\n @hookimpl\n def some_method(self, arg):\n out.append(arg + 6)\n return arg + 6\n\n pm.register_specs(HookSpec())\n pm.register(Plugin1())\n pm.register(Plugin2())\n pm.register(Plugin3())\n pm.register(Plugin4())\n pm.register(Plugin5())\n pm.register(Plugin6())\n results = pm.hooks.some_method(arg=0)\n values = [result.value for result in results]\n assert values == [4, 2, 6, 1, 3, 5]\n assert out == [4, 2, 6, 1, 3, 5]\n","sub_path":"tests/multicall/test_multicall_parallel.py","file_name":"test_multicall_parallel.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"125072694","text":"# -*-coding:utf-8-*-\n\"\"\"\nObjectif : tester la classe Entity et ses méthodes\n\"\"\"\n\nfrom entity import *\n\nSchro = Entity(\"ennemy\",0,0) # Création de Schrodinger\nSchro.move() # Déplacement de Schrodinger\n\nn = 0\nwhile Schro.d == \"right\":\n # teste le nombre d'itérations pour que Schro. arrive à la fin de la ligne (x=100) et change de direction\n Schro.move()\n print(Schro.x, Schro.y) # affichage pour test\n print(Schro.d, Schro.a, Schro.v) # affichage pour test\n n += 1\nprint(n) # retourne le nombre d'itérations\n\n","sub_path":"test_entity.py","file_name":"test_entity.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"549891674","text":"import os\nimport zipfile\nimport utility\n\nfrom altoxml_parser import AltoXmlParser\n\ndef extract_documents(source_filename, pattern, line_break='\\n', page_break='\\n'):\n\n parser = AltoXmlParser(line_break=line_break, page_break=page_break)\n\n with zipfile.ZipFile(source_filename, 'r') as zf:\n\n for package_id, filenames in utility.zip_folder_glob(zf, pattern):\n\n xml_contents = (zf.read(filename) for filename in filenames)\n\n document_tokens = parser.document(xml_contents)\n\n document = ' '.join([ x for x in document_tokens ])\n\n yield package_id, document\n\ndef extract_corpus(source_filename, target_filename, pattern, line_break='\\n', page_break='\\n'):\n\n texts = (\n (\"{}.txt\".format(package_id), text)\n for package_id, text in\n extract_documents(source_filename, pattern, line_break=line_break, page_break=page_break)\n )\n utility.store_to_zipfile(target_filename, texts)\n\nsource_filename = \"/home/roger/tmp/riksdagens_protokoll.zip\"\ntarget_filename = \"/home/roger/tmp/riksdagens_protokoll_corpus.zip\"\npattern = \"prot_*.xml\"\nline_break='\\n'\npage_break='\\n#########\\n'\n\nextract_corpus(source_filename, target_filename, pattern, line_break=line_break, page_break=page_break)","sub_path":"westac/kblab/kblab_altoxml_text.py","file_name":"kblab_altoxml_text.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"310856262","text":"dependencies = ['segmentation_models_pytorch']\n\nimport torch\nfrom segmentation_models_pytorch import Unet, Linknet, FPN, PSPNet\n\n\ndef segmentation_model(pretrained=True, experiment_name='lung-segmentation', architecture_name='Unet', encoder='resnet18', encoder_weights='imagenet'):\n \"\"\"\n segmentation model\n pretrained (bool): load pretrained weights into the model\n experiment_name ('lung-segmentation', 'lesion-segmentation-a'): Experiment name\n architecture_name ('Unet', 'Linknet', 'FPN', 'PSPNet'): Architecture name\n encoder ('vgg11', 'vgg13', 'vgg19', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152', 'densenet121', 'densenet161', 'densenet169', 'densenet201', 'resnext50_32x4d', 'dpn68', 'dpn98', 'mobilenet_v2', 'xception', 'inceptionv4', 'efficientnet-b0', 'efficientnet-b1', 'efficientnet-b2', 'efficientnet-b3', 'efficientnet-b4', 'efficientnet-b5', 'efficientnet-b6'): Encoder\n encoder_weights (None, 'imagenet'): Encoder weights\n \"\"\"\n if architecture_name == 'Unet':\n architecture = Unet\n if architecture_name == 'Linknet':\n architecture = Linknet\n if architecture_name == 'FPN':\n architecture = FPN\n if architecture_name == 'PSPNet':\n architecture = PSPNet\n model = architecture(encoder, encoder_weights=encoder_weights, activation='sigmoid', in_channels=1).to('cpu')\n if pretrained:\n checkpoint = f'https://github.com/pbizopoulos/comprehensive-comparison-of-deep-learning-models-for-lung-and-covid-19-lesion-segmentation-in-ct/releases/download/v1/{experiment_name}-{architecture_name}-{encoder}-{encoder_weights}.pt'\n model.load_state_dict(torch.hub.load_state_dict_from_url(checkpoint, progress=False, map_location='cpu'))\n return model\n","sub_path":"hubconf.py","file_name":"hubconf.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"552660433","text":"import pygame\nfrom pygame.locals import *\nfrom random import randint\nimport time\npygame.init()\npygame.display.set_caption('pac-man') \necran = pygame. display.set_mode((1200, 800))\nfond = pygame.Surface((1200,800))\nfond.convert()\necran_rect = ecran.get_rect()\n\npacman = pygame.image.load('pacman.png') #charger l'image\npacman.set_colorkey((255,255,255)) #transparence\npacman.convert()\nposition_pacman=pacman.get_rect()\n\nfantome = pygame.image.load('fantome.png')\nfantome.set_colorkey((255,255,255))\nfantome.convert()\nposition_fantome=fantome.get_rect()\n\nfantome2 = pygame.image.load('fantome2.png')\nfantome2.set_colorkey((255,255,255))\nfantome2.convert()\nposition_fantome2 = fantome2.get_rect()\n\nfantome3 = pygame.image.load('fantome3.png')\nfantome3.set_colorkey((255,255,255))\nfantome3.convert()\nposition_fantome3 = fantome3.get_rect()\n\nfantome4 = pygame.image.load('fantome4.png')\nfantome4.set_colorkey((255,255,255))\nfantome4.convert()\nposition_fantome4 = fantome4.get_rect()\n\n\n\n\n\n#création du labyrinthe\nCOTE = 40\nCOULEURS = { 'bleu' : (0,0,150), 'rouge' : (200,0,0)}\n\n\n\ndef fichier_to_matrice(fichier):\n \"\"\"Ouvre un fichier texte de type csv et récupère les données\n pour les stocker dans un tableau à 2 dimensions\"\"\"\n f = open(fichier, 'r')\n t = []\n for ligne in f:\n champs_texte = ligne.rstrip().split(',')\n print(champs_texte) #pour visualiser\n champs_entier = [int(c) for c in champs_texte]\n print(champs_entier) #pour visualiser\n t.append(champs_entier)\n #en une ligne :\n #return [list(map(int, ligne.rstrip().split(','))) for ligne in f]\n f.close()\n return t\n \ndef matrice_to_laby(matrice):\n \"\"\"Convertit en surface un tableau à deux dimensions représentant un labyrinthe\"\"\"\n #Largeur w (en briques) : le nombre d'éléments (ou colonnes) de la première ligne de l matrice\n #Hauteur h le nombre de lignes de la matrice\n largeur, hauteur = len(matrice[0]), len(matrice)\n laby = pygame.surface.Surface((largeur*COTE, hauteur*COTE))\n laby.convert()\n for x in range(largeur):\n for y in range(hauteur):\n if matrice[y][x] == 1:\n laby.blit(MUR, (x*COTE, y*COTE))\n else:\n laby.blit(PASSAGE, (x*COTE, y*COTE))\n return laby\n \n \n\n\n#mise en place de l'environnement graphique du labyrinthe\nmatrice = fichier_to_matrice('labyrinthe_pacman.csv')\nlargeur, hauteur = len(matrice[0]), len(matrice)\necran = pygame.display.set_mode((largeur*COTE, hauteur*COTE))\nMUR = pygame.surface.Surface((COTE, COTE))\nMUR.fill(COULEURS['rouge'])\nMUR.convert()\nPASSAGE = pygame.surface.Surface((COTE, COTE))\nPASSAGE.fill(COULEURS['bleu'])\nPASSAGE.convert()\nlabyrinthe = matrice_to_laby(matrice)\n\n\"\"\"\n#musique\npygame.mixer.music.load('pacman.wav')\npygame.mixer.music.play()\n\n\"\"\"\n\nclock = pygame.time.Clock()\n#collage du labyrnthe\necran.blit(labyrinthe,(0,0)) #blit sur l'écran principal du labyrinthe\n\n\n\n#collages initiaux de pacman\npacmatrice = [4, 1]\nposition_pacman = position_pacman.move((pacmatrice[1]*40,pacmatrice[0]*40))\necran.blit(pacman,position_pacman) #blit sur l'écran principal de pacman\n\n\n#collages initiaux des fantomes\nfantomatrice = [11,2]\nposition_fantome = position_fantome.move((fantomatrice[1]*40,fantomatrice[0]*40))\necran.blit(fantome,position_fantome)\n\nfantomatrice2 = [8,20]\nposition_fantome2 = position_fantome2.move((fantomatrice2[1]*40,fantomatrice2[0]*40))\necran.blit(fantome2, position_fantome2)\n\nfantomatrice3 = [9,12]\nposition_fantome3 = position_fantome3.move((fantomatrice3[1]*40,fantomatrice3[0]*40))\necran.blit(fantome3, position_fantome3)\n\nfantomatrice4 = [1,10]\nposition_fantome4 = position_fantome4.move((fantomatrice4[1]*40,fantomatrice4[0]*40))\necran.blit(fantome4,position_fantome4)\n\n\n\n\ndef deplacer_pacman(n):\n global position_pacman\n i, j = pacmatrice \n if n == 0: #droite\n j += 1\n elif n == 1: #gauche\n j += -1\n elif n == 2: #haut\n i += -1\n elif n == 3:\n i += 1\n if matrice[i][j] == 0:\n print(i, j)\n pacmatrice[0], pacmatrice[1] = i, j\n position_pacman.topleft = (pacmatrice[1]*40, pacmatrice[0]*40)\n \n\n\ndef deplacer_fantome(n):\n global position_fantome\n x, y = fantomatrice\n if n == 0: #droite\n y += 1\n elif n == 1: #gauche\n y += -1\n elif n == 2: #haut\n x += -1\n elif n == 3:\n x += 1\n if matrice[x][y] == 0:\n fantomatrice[0], fantomatrice[1] = x, y\n position_fantome.topleft = (fantomatrice[1]*40, fantomatrice[0]*40)\n return position_fantome\n\ndef deplacer_fantome2(n):\n global position_fantome2\n x, y = fantomatrice2\n if n == 0: #droite\n y += 1\n elif n == 1: #gauche\n y += -1\n elif n == 2: #haut\n x += -1\n elif n == 3:\n x += 1\n if matrice[x][y] == 0:\n fantomatrice2[0], fantomatrice2[1] = x, y\n position_fantome2.topleft = (fantomatrice2[1]*40, fantomatrice2[0]*40)\n return position_fantome2\n\n\n\n\ndef deplacer_fantome3(n):\n global position_fantome3\n x, y = fantomatrice3\n if n == 0: #droite\n y += 1\n elif n == 1: #gauche\n y += -1\n elif n == 2: #haut\n x += -1\n elif n == 3: #bas\n x += 1\n if matrice[x][y] == 0:\n fantomatrice3[0], fantomatrice3[1] = x, y\n position_fantome3.topleft = (fantomatrice3[1]*40, fantomatrice3[0]*40)\n return position_fantome3\n\n\ndef deplacer_fantome4(n):\n global position_fantome4\n x, y = fantomatrice4\n if n == 0: #droite\n y += 1\n elif n == 1: #gauche\n y += -1\n elif n == 2: #haut\n x += -1\n elif n == 3:\n x += 1\n if matrice[x][y] == 0:\n fantomatrice4[0], fantomatrice4[1] = x, y\n position_fantome4.topleft = (fantomatrice4[1]*40, fantomatrice4[0]*40)\n return position_fantome4\n \n \n \ndef perte_de_vie():\n \"\"\"fonction qui vérifie quand le joueur perd une vie\"\"\"\n global life\n global pacmatrice\n if position_pacman == position_fantome or position_pacman == position_fantome2 or position_pacman == position_fantome3 or position_pacman == position_fantome4:\n life -= 1\n pacmatrice = [4, 4]\n if life == -1:\n pygame.quit()\n \n \ndef afficher_vie():\n font = pygame.font.SysFont('comicsansms',50)\n afficher_vies = font.render(str(life), 1, (255, 255, 0))\n ecran.blit(afficher_vies,(0,0))\n\n\nlife = 3\n#Boucles évènement\ncontinuer = 1\n\npygame.display.flip()\nwhile continuer:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.display.quit() #permet de quitter le jeu en cliquant sur la X .\n pygame.mixer.music.stop()\n elif event.type == KEYDOWN:\n if event.key == K_RIGHT:#déplacement à droite\n deplacer_pacman(0)\n n = randint(0,3)\n o = randint(0,3)\n p = randint(0,3)\n q = randint(0,3)\n deplacer_fantome(n)\n deplacer_fantome2(o)\n deplacer_fantome3(p)\n deplacer_fantome4(q)\n perte_de_vie()\n print(position_pacman.x, position_pacman.y)\n ecran.blit(labyrinthe,(0,0))\n ecran.blit(pacman,position_pacman)\n ecran.blit(fantome,position_fantome)\n ecran.blit(fantome2,position_fantome2)\n ecran.blit(fantome3,position_fantome3)\n ecran.blit(fantome4,position_fantome4)\n afficher_vie()\n pygame.display.flip()\n elif event.key == K_LEFT:#déplacement à droite\n pacman3= pygame.transform.rotate(pacman,180)\n deplacer_pacman(1)\n n = randint(0,3)\n o = randint(0,3)\n p = randint(0,3)\n q = randint(0,3)\n deplacer_fantome(n)\n deplacer_fantome2(o)\n deplacer_fantome3(p)\n deplacer_fantome4(q)\n perte_de_vie()\n print(position_pacman.x, position_pacman.y)\n ecran.blit(labyrinthe,(0,0))\n ecran.blit(pacman3,position_pacman)\n ecran.blit(fantome,position_fantome)\n ecran.blit(fantome2,position_fantome2)\n ecran.blit(fantome3,position_fantome3)\n ecran.blit(fantome4,position_fantome4)\n afficher_vie()\n pygame.display.flip()\n elif event.key == K_UP:#déplacement à droite\n pacman2= pygame.transform.rotate(pacman,90)\n deplacer_pacman(2)\n n = randint(0,3)\n o = randint(0,3)\n p = randint(0,3)\n q = randint(0,3)\n deplacer_fantome(n)\n deplacer_fantome2(o)\n deplacer_fantome3(p)\n deplacer_fantome4(q)\n perte_de_vie()\n print(position_pacman.x, position_pacman.y)\n ecran.blit(labyrinthe,(0,0))\n ecran.blit(pacman2,position_pacman)\n ecran.blit(fantome,position_fantome)\n ecran.blit(fantome2,position_fantome2)\n ecran.blit(fantome3,position_fantome3)\n ecran.blit(fantome4,position_fantome4)\n afficher_vie()\n pygame.display.flip()\n elif event.key == K_DOWN:#déplacement à droite\n pacman4= pygame.transform.rotate(pacman,270)\n deplacer_pacman(3)\n n = randint(0,3)\n o = randint(0,3)\n p = randint(0,3)\n q = randint(0,3)\n deplacer_fantome(n)\n deplacer_fantome2(o)\n deplacer_fantome3(p)\n deplacer_fantome4(q)\n perte_de_vie()\n print(position_pacman.x, position_pacman.y)\n ecran.blit(labyrinthe,(0,0))\n ecran.blit(pacman4,position_pacman)\n ecran.blit(fantome,position_fantome)\n ecran.blit(fantome2,position_fantome2)\n ecran.blit(fantome3,position_fantome3)\n ecran.blit(fantome4,position_fantome4)\n afficher_vie()\n pygame.display.flip()\n elif event.key == K_PRINT: #appuyer sur \"impécran\" pour faire une capture d'écran.\n pygame.image.save(ecran,'screenshot_pacman.png')\n \n \n\n \n\n\n","sub_path":"pacman_jeu.py","file_name":"pacman_jeu.py","file_ext":"py","file_size_in_byte":10527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"93446908","text":"import json\nimport random\n\nimport flask\nimport numpy as np\nfrom flask import request\n\napp = flask.Flask(__name__)\napp.config['DEBUG'] = True\n\nwith open('secondary_1/app/storage.json', mode='rt') as fp:\n msgs = json.load(fp)\nids = [int(msg['id']) for msg in msgs['messages']]\n\n\ndef generate_error():\n rand_num = random.randint(0, 20)\n if rand_num % 3 == 0:\n raise ValueError('This is just a random error to be raised')\n\n\ndef write2storage():\n with open('secondary_1/app/storage.json', mode='wt') as fp:\n json.dump(msgs, fp)\n\n\n@app.route('/api/append', methods=['POST'])\ndef append():\n try:\n data = json.loads(request.json)\n if int(data['id']) not in ids:\n msgs['messages'].append(data)\n ids.append(int(data['id']))\n write2storage()\n\n generate_error()\n return 'Successfully processed message!', 200\n except:\n return 'Not able to process message!', 400\n\n\ndef get_ordered_slice():\n ixs = np.argsort(ids)\n\n sliced = []\n for cnt, ix in enumerate(ixs):\n if int(msgs['messages'][ix]['id']) == cnt + 1:\n sliced.append(msgs['messages'][ix])\n else:\n break\n return sliced\n\n\n@app.route('/api/list_messages', methods=['GET'])\ndef list_messages():\n if not msgs['messages']:\n return 'No messages!'\n\n sliced = get_ordered_slice()\n return str(sliced)\n\n\n@app.route('/api/check_health', methods=['GET'])\ndef check_health():\n return 'Healthy'\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8081)\n","sub_path":"task-3/secondary_1/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"456786865","text":"from tkinter import *\nimport pygame\nfrom tkinter.messagebox import showinfo\nfrom tkinter import filedialog\nimport time\nimport mutagen.mp3\nfrom mutagen.mp3 import MP3\nimport tkinter.ttk as ttk\n\n\nroot = Tk()\n\nroot.title('player')\nicon=PhotoImage(file=r\"icons/MP3.png\")\nroot.iconphoto(False,icon)\nroot.geometry(\"1280x700\")\nroot.configure(bg='#191414')\n\n\npygame.mixer.init()\n\n# Defining Function to Get length and time information about current song\ndef song_time():\n \n # Current Position of song in seconds (Dividing by thousand as default is milliseconds)\n current_time = pygame.mixer.music.get_pos() / 1000 \n \n # Converting given time to SPECIFIC FORMAT (more formal way H:M:S here)\n formal_time = time.strftime('%M:%S', time.gmtime(current_time))\n \n # Now Finding Current Song\n song = playlist.get(ACTIVE) # Grab song title from playlist using ACTIVE that represents current song here\n song = f'E:/Python_project/Songs/{song}.mp3' # Adding extra removed stuffs of path of a song\n \n # Now Finding Length Of A song using Mutagen after getting current song as above\n song_in_mut = MP3(song) # Passing song in mutagen and loading it with module to find it's Length\n song_len = song_in_mut.info.length # This will return us the length of selected song in seconds\n \n # Now converting the time we got in seconds to M:S form\n song_length = time.strftime('%M:%S', time.gmtime(song_len)) \n \n # Output time and song length to show on screen using config\n status_bar.config(text=f\" Song Duration: {formal_time} / {song_length} \")\n # Now we want to do this every time our new song starts playing so calling this song_time in play\n\n # Now updating current_time of song every single second(1000 milliseconds) till it's Playing that is done by after\n # Basically like looping(i.e Calling function every single second till length of song)\n \n status_bar.after(1000, song_time)\n\n\n# Defining Remove A Song Function in Add Option in Main Menu\ndef remove_song(): # Removes a selected one\n \n # Removing the Highlighted Song (i.e. here so called ANCHORED SONG)\n playlist.delete(ANCHOR)\n # After deleting the song it must stop playing it so we stop the song here (if playing)\n pygame.mixer.music.stop()\n\n# Defining Remove Many Songs Function in Add Option in Main Menu \ndef remove_all_songs(): # Removes all\n \n # Passing All Songs(we selected before in playlist) at once to delete using range form (0, till END) \n playlist.delete(0,END)\n # Stop playing any song (if its playing) \n pygame.mixer.music.stop()\n\n# Defining Add A Song Function in Add Option in Main Menu \ndef add_song():\n \n # To Open files to select songs from any directory\n song = filedialog.askopenfilename(title=\"Select One Song\" , filetypes=((\"MP3 Files\", \"*.mp3\"), ))\n \n # Adding one other variable to give our songs whole path to it\n temp_song=song\n \n # To Remove Extra Stuffs Getting printed While Adding Song Name in Queue\n h=-1\n for i in range(len(song)):\n if(song[h]==\"/\"):\n song = song.replace(song[0:(h+1)], \"\")\n song = song.replace(\".mp3\", \"\")\n break\n else:\n h=h-1\n \n # Adding Song To playlist\n playlist.insert(END, song)\n\n# Defining Add Many Songs Function in Add Option in Main Menu \n\ndef add_many_songs():\n songs = filedialog.askopenfilenames(title=\"Select Many Songs\" , filetypes=((\"MP3 Files\", \"*.mp3\"), ))\n \n # Giving paths of all songs in tuple to a temporary variable so as to access the whole path of any song from anywhere\n temp_songs=songs\n \n \n # Assigning temporary variable to every song in songs \n for temp_song in temp_songs:\n temp_song=temp_song\n \n \n # As Add Many Songs Is just Repetiton Of What We Did In Add A Song, We will Do That Things in loop\n \n for song in songs:\n\n # To Remove Extra Stuffs Getting printed While Adding Song Name in Queue\n h=-1\n for i in range(len(song)):\n if(song[h]==\"/\"):\n song = song.replace(song[0:(h+1)], \"\")\n song = song.replace(\".mp3\", \"\")\n break\n else:\n h=h-1\n\n # Adding Song To playlist\n playlist.insert(END, song)\n \n# Defining Help Button's Function\n\ndef Help():\n # Showinginfo is a command to display written things on Screen inside tkinter.messagebox, whose syntax is (Label, Message to be shown)\n showinfo(\"MP3 PLAYER\", \"Contact ESS112_GROUP-1 For Doubts Related To This Code\") \n\n\n# Defining About Button's Function\n\ndef About():\n # Showinginfo is a command to display written things on Screen inside tkinter.messagebox, whose syntax is (Label, Message to be shown)\n showinfo(\"MP3 PLAYER\", \"MP3 PLAYER by ESS112_GROUP-1\")\n\n\n# Defining Volume Function to do it's work\n\n# To See The level Of Volume Stretch from below or MP3 Player to see volume there \n\n # pos here holds the value that where basically the volume slider is there\ndef Volume(pos):\n # Using this command we can increase volume from above to down \n # MAX value at Bottom is 1 and Above is 0\n pygame.mixer.music.set_volume(volume_slider.get()) \n \n# Given Below Part is used in play but is a part of Volume slider, so added here as comments\n# We here gave Curvol as it shows The Current Volume while we play any song after being loaded\n # Curvol shows Current volume here \n # curvol = pygame.mixer.music.get_volume()\n # volume_slider_label.config(text=curvol * 100) # Multiplied by 100 as volume by default is shown in floating points using pygame \n\n# Giving Works To Every Buttons \n\n # Defining Play Button\n\ndef Play():\n \n # To Load Selected Song\n song = playlist.get(ACTIVE)\n \n # Adding Extra Part Of Path Of Function As No Song will be played just by its name \n song = f'E:/Python_project/Songs/{song}.mp3'\n \n # Playing song with the help of pygame \n pygame.mixer.music.load(song)\n pygame.mixer.music.play(loops=0)\n\n # Calling song_time function in Play\n song_time()\n\n# We here gave Curvol as it shows The Current Volume while we play any song after being loaded\n # Curvol shows Current volume here \n curvol = pygame.mixer.music.get_volume()\n volume_slider_label.config(text=curvol * 100) # Multiplied by 100 as volume by default is shown in floating points using pygame \n volume_slider_label[\"bg\"]= \"red\" # Setting Red colour to background where it shows text(volume level)\n volume_slider_label[\"fg\"]= \"white\" # Setting white colour to text shown \n\n# Create Check Variable To Check Whether A Song Is Running Or Not\nglobal Check\nCheck = False\n\n # Defining Pause Button\ndef Pause(is_paused):\n \n # Using Global Variable Here So that Every Time We Pause Or Unpause A Song, The Value Of The \"Check\" Variable Changes, allowing us to work properly with our player\n global Check\n Check = is_paused \n \n # Pausing A Song \n if Check==False:\n # Used Direct Command with Mixer Module To Pause Song\n pygame.mixer.music.pause()\n # Changing \"Check\" Variable's value to True tell \"SONG IS PAUSED NOW\"\n Check = True\n \n # Unpausing A Song\n else:\n # Used Direct Command with Mixer Module To UnPause Song\n pygame.mixer.music.unpause()\n # Changing \"Check\" Variable's value to False tell \"SONG IS UNPAUSED NOW\"\n Check = False\n\n # Defining Forward Button\ndef Forward():\n \n# Converting Songs To Tuples here using curselection so to know which song is being played\n# Basically here Songs Are Numbered\n# Curselection is Current Selection To know which song is being played from given list of tuples of songs\n next_song = playlist.curselection()\n \n # Now Adding One To Current Song number from tuples to Select \"NEXT\" song from Tuple of songs(OR Order in which we selected the songs)\n next_song = next_song[0]+1\n \n \n \n # Getting The Song Corresponding To Number In Tuple\n song = playlist.get(next_song)\n \n # Adding Extra Part Of Path Of Function As No Song will be played just by its name \n song = f'E:/Python_project/Songs/{song}.mp3'\n \n # Now After Selecting The Next Song By Above Steps, We'll Play THE NEXT SONG\n \n # Playing song with the help of pygame \n pygame.mixer.music.load(song)\n pygame.mixer.music.play(loops=0)\n\n# Now to Move Selection Line(Showing Current Song) to Next Song in playlist by clearing it from Current song and Make it appear on Next Song\n \n # So, clearing bar From Current Song here.\n playlist.selection_clear(0, END)\n # Making Appear(Activating) Selection Line On Next Song After clearing it from current song\n playlist.activate(next_song) # This Will just move underline from current song to next song\n \n # Here, we did last = none means it says we are not highlighting more than one thing in list and just ending highlighting in one element only\n playlist.selection_set(next_song, last=None) # This will move highlighter to next song\n \n \n # Defining Back Button\ndef Back():\n \n# Not Commenting Back Part As it is just Reverse to what we did in Forward and process is simple\n\n previous_song = playlist.curselection()\n previous_song = previous_song[0]-1\n song = playlist.get(previous_song)\n song = f'E:/Python_project/Songs/{song}.mp3'\n pygame.mixer.music.load(song)\n pygame.mixer.music.play(loops=0)\n playlist.selection_clear(0, END)\n playlist.selection_set(previous_song, last=None)\n\n # Defining Stop Button\ndef Stop():\n # Used Direct Command with Mixer Module To Stop Song\n pygame.mixer.music.stop()\n # Clearing Selection line from current song\n playlist.selection_clear(ACTIVE)\n \n # Clearing Status_Bar by writing nothing inside it as, when we use stop as no song will be played after it\n status_bar.config(text=\" \")\n \n#basic_cmd_frame = Frame(root).grid(row=1, column=0)\nmaster_frame = Frame(root)\nmaster_frame.pack(pady = 30)# pady means padding in y to make it look properly aligned and attractive (same for padx in x direction so not explaining it everywhere)\nmaster_frame['bg'] = 'white' \n\n\n\"\"\"button_frame=Frame(root,bg=\"black\")\nbutton_frame.grid(row=1, column=1)\n\"\"\"\nplaylist = Listbox(master_frame, bg=\"orange\", fg=\"White\", width=40, selectbackground='DarkGreen') # Putting our playlist in Master frame\nplaylist.grid(row=0, column=0) \n\nvolume_frame = LabelFrame(master_frame, text=\"Volume\")\nvolume_frame.grid(row=0, column=1, padx=30)\n\nforward_image = PhotoImage(file=r\"icons/resized/fast-forward-button.png\")\nback_image = PhotoImage(file=r\"icons/resized/rewind.png\")\nstop_image = PhotoImage(file=r\"icons/resized/stop-button.png\")\npause_image = PhotoImage(file=r\"icons/resized/pause_button.png\")\nplay_image = PhotoImage(file=r\"icons/resized/play-button.png\")\n\nback = Button(root, image=back_image,fg=\"black\", borderwidth=0, command=Back)\nforward = Button(root, image=forward_image,fg=\"black\", borderwidth=0, command=Forward)\nplay = Button(root, image=play_image,fg=\"black\", borderwidth=0, command=Play)\npause = Button(root, image=pause_image,fg=\"black\", borderwidth=0, command=lambda: Pause(Check))\nstop = Button(root, image=stop_image,fg=\"black\", borderwidth=0, command=Stop)\n\nback.grid(row=1, column=1, padx=(270,8))\nforward.grid(row=1, column=5, padx=8) \nplay.grid(row=1, column=2, padx=8) \npause.grid(row=1, column=3, padx=8)\nstop.grid(row=1, column=4, padx=8)\n\n#shuff_btn = Button(root, image=shuff_btn_img, borderwidth=0).grid(row=1, column=3, padx=40)\n#loop_btn = Button(root, image=loop_btn_img, borderwidth=0).grid(row=1, column=7, padx=40)\n\nbf.main()\n\n\n\n\nroot.mainloop()\n","sub_path":"trial.py","file_name":"trial.py","file_ext":"py","file_size_in_byte":11675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"112459497","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#######################################################################\n#\n# This module support logging\n#\n#######################################################################\n\n\nimport logging\nimport logging.config\nfrom cloghandler import ConcurrentRotatingFileHandler\n\nLOGGING_INI = 'logging.ini'\nlogging_configured = False\nKEYWORDS_FOR_LOGGING = ['id',\n 'name',\n 'direction',\n 'lane_type',\n 'type',\n 'compass',\n 'bearing',\n 'lane_id',\n 'city',\n 'streets',\n 'center_x',\n 'center_y',\n 'size',\n 'crop_radius',\n 'east',\n 'west',\n 'north',\n 'south'\n ]\n\ndef init_logger(force=False):\n\n global logging_configured\n\n if not force and logging_configured:\n return\n\n logging.config.fileConfig(LOGGING_INI)\n logging_configured = True\n logger = get_logger()\n logger.info(\"\\n\\n-------------------------------------------------------------------------------------------------\")\n logger.info(\"Logging configured\\n\")\n\n\ndef get_logger():\n \"\"\"\n Get logger\n :return: logger\n \"\"\"\n if not logging_configured:\n init_logger()\n\n return logging.getLogger()\n\n\ndef dictionary_to_log(d, keywords_for_logging=KEYWORDS_FOR_LOGGING):\n \"\"\"\n Prepare dictionary vital parameters for logging\n :param d:\n :param keywords_for_logging: list of dictionary keywords to log\n :return: string\n \"\"\"\n result = ''\n if d is not None:\n for key in keywords_for_logging:\n if key in d:\n try:\n if key == 'direction' or key == 'type':\n result = result + d[key] + ' '\n elif key == 'city':\n result = result + d['city'].split(',')[0] + ' '\n else:\n result = result + (\"%s=%s \" % (key, str(d[key])))\n except UnicodeEncodeError:\n continue\n\n return result\n","sub_path":"source_code/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"571986306","text":"import json\r\nfrom datetime import datetime\r\nfrom flask import Flask, render_template, jsonify, request\r\nfrom flask_cors import CORS, cross_origin\r\nimport psycopg2\r\n\r\napp = Flask(__name__)\r\n\r\n# CORS(app)\r\n\r\n\r\n@app.after_request\r\ndef after_request(response):\r\n header = response.headers\r\n header['Access-Control-Allow-Origin'] = '*'\r\n return response\r\n\r\n\r\ndef getConn():\r\n return psycopg2.connect(database=\"store_db\", user=\"postgres\", password=\"G0alie!5\", host=\"127.0.0.1\", port=\"5432\")\r\n\r\n\r\n@app.route(\"/getCustomers\")\r\ndef getCustomers():\r\n conn = getConn()\r\n cur = conn.cursor()\r\n\r\n cur.execute(\"SELECT * from customers\")\r\n rows = cur.fetchall()\r\n results = []\r\n for row in rows:\r\n results.append({\r\n 'id': row[0],\r\n 'name': row[1],\r\n 'address': row[2],\r\n 'phone': row[3],\r\n 'email': row[4],\r\n })\r\n\r\n conn.close()\r\n\r\n return jsonify(results)\r\n\r\n\r\n@app.route(\"/getCustomerSalesHistory\")\r\ndef getCustomerSalesHistory():\r\n customer = request.args.get(\"customer\")\r\n conn = getConn()\r\n cur = conn.cursor()\r\n\r\n cur.execute(\"SELECT s.id, s.total_sale, l.name, s.sale_date \"\r\n \"from sales s, ref_store_locations l \"\r\n \"where l.id = s.location_id \"\r\n \"and s.customer_id = {}\".format(customer))\r\n rows = cur.fetchall()\r\n results = []\r\n for row in rows:\r\n results.append({\r\n 'id': row[0],\r\n 'total_sale': float(row[1]),\r\n 'location': row[2],\r\n 'sale_date': row[3]\r\n })\r\n conn.close()\r\n\r\n return jsonify(results)\r\n\r\n\r\n@app.route(\"/getAllOrders\")\r\ndef getAllOrders():\r\n conn = getConn()\r\n cur = conn.cursor()\r\n\r\n cur.execute(\"SELECT s.id, c.name, s.total_sale, l.name, s.sale_date \"\r\n \"from sales s, ref_store_locations l, customers c \"\r\n \"where l.id = s.location_id \"\r\n \"and c.id = s.customer_id\")\r\n rows = cur.fetchall()\r\n results = []\r\n for row in rows:\r\n if row[4] is not None:\r\n results.append({\r\n 'id': row[0],\r\n 'name': row[1],\r\n 'total_sale': float(row[2]),\r\n 'location': row[3],\r\n 'sale_date': row[4]\r\n })\r\n conn.close()\r\n\r\n return jsonify(results)\r\n\r\n\r\n@app.route(\"/getAllItems\")\r\ndef getAllItems():\r\n conn = getConn()\r\n cur = conn.cursor()\r\n\r\n cur.execute(\"SELECT i.*, r.name from items i, ref_item_types r where r.id = i.type\")\r\n rows = cur.fetchall()\r\n results = []\r\n for row in rows:\r\n print(row)\r\n results.append({\r\n 'id': row[0],\r\n 'name': row[1],\r\n 'weight': float(row[2]),\r\n 'price': float(row[4]),\r\n 'description': row[5],\r\n 'type_desc': row[-1]\r\n })\r\n\r\n conn.close()\r\n\r\n return jsonify(results)\r\n\r\n\r\n@app.route('/addCustomer', methods=['POST'])\r\n@cross_origin()\r\ndef add_customer():\r\n if request.method == 'POST':\r\n\r\n body = json.loads(request.data)\r\n\r\n if body is not False:\r\n conn = getConn()\r\n cur = conn.cursor()\r\n\r\n cur.execute(\"insert into customers (name, address, phone, email) values ( \\'{}\\', \\'{}\\', \\'{}\\',\\'{}\\' )\"\r\n .format(body['name'], body['address'], body['phone'], body['email']))\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n response = jsonify({'some': 'data'})\r\n response.headers.add('Access-Control-Allow-Origin', '*')\r\n return response\r\n\r\n\r\n@app.route('/addItem', methods=['POST'])\r\n@cross_origin()\r\ndef add_item():\r\n if request.method == 'POST':\r\n\r\n body = json.loads(request.data)\r\n\r\n if body is not False:\r\n conn = getConn()\r\n cur = conn.cursor()\r\n\r\n cur.execute(\"insert into items (name, weight, type, price, description) values ( \\'{}\\', {}, \\'{}\\', {},\\'{}\\' )\"\r\n .format(body['name'], body['weight'], body['type'], body['price'], body['description']))\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n response = jsonify({'some': 'data'})\r\n response.headers.add('Access-Control-Allow-Origin', '*')\r\n return response\r\n\r\n\r\n@app.route('/addOrder', methods=['POST'])\r\n@cross_origin()\r\ndef add_order():\r\n if request.method == 'POST':\r\n\r\n body = json.loads(request.data)\r\n\r\n if body is not False:\r\n conn = getConn()\r\n cur = conn.cursor()\r\n\r\n cur.execute(\"insert into sales (customer_id, total_sale, location_id, sale_date) values ( {}, {}, {}, \\'{}\\' )\"\r\n .format(body['customer_id'], body['total_sale'], body['location_id'], datetime.now()))\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n response = jsonify({'some': 'data'})\r\n response.headers.add('Access-Control-Allow-Origin', '*')\r\n return response\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"281063360","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 5 15:13:58 2019\n\n@author: ROHIT\n\"\"\"\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\ndef conc(x,y,z,hs,z0):\n\n iy=0.88/(math.log((hs/z0),math.e)-1)\n iz = 0.50 / (math.log((hs / z0), math.e) - 1)\n\n sigmay=x*iy\n sigmaz=x*iz\n\n temp1=(Q/U)*(1/(2*math.pi*sigmay*sigmaz))\n temp2=math.exp(-math.pow(y,2)/(2*(math.pow(sigmay,2))))\n temp3=(math.exp((-math.pow((z-H),2))/(2*math.pow(sigmaz,2))) + math.exp((-math.pow((z+H),2)/(2*math.pow(sigmaz,2)))))\n final=temp1*temp2*temp3\n\n return final;\n\n\n#iy=0.10\n#iz=0.05\n'''\nQ=2*math.pow(10,5)\nhs=120\ndeltah=10\nU=15\nz0=0.01\nH=hs+deltah\nsizex=2*H;\narr=np.zeros((sizex,800))\ncolor=np.zeros((sizex,800),dtype='int32')\n\nfor i in range(1,sizex):\n for j in range(1,800):\n arr[i][j]=conc(j,0,i,hs,z0)\n\nl1=333397.0790295258566\nl2=33333.97079029525857\nl3=8333.492697573814642\nl4=244.1231743934536604\nl5=1.9436330637602476\nl6=0.6932814371117969\nl7=0.0555\nl8=0.005\n\n\nfor i in range(1,2*H):\n for j in range(1,800):\n if arr[i][j]l8 and arr[i][j]l7 and arr[i][j]l6 and arr[i][j]l5 and arr[i][j]l4 and arr[i][j]l3 and arr[i][j] bool\n bndbox['xmax'], bnbox['xmin'], bnbbox['..'] : str -> int\n object : this is always converted to a list\n \"\"\"\n output_target = target.copy()\n\n output_target['annotation']['segmented'] = bool(output_target['annotation']['segmented'])\n for k, v in output_target['annotation']['size'].items():\n output_target['annotation']['size'][k] = int(v)\n\n\n if type(output_target['annotation']['object']) is not list:\n # If this is a not a list, it contains a single object\n # that we put in a list\n output_target['annotation']['object'] = [output_target['annotation']['object']]\n\n objects = output_target['annotation']['object']\n\n for o in objects:\n for k, v in o['bndbox'].items():\n o['bndbox'][k] = int(v)\n for k in ['occluded', 'difficult', 'truncated']:\n o[k] = bool(int(o[k]))\n\n return output_target\n\ndef extract_class_to_tensor(target: dict):\n cls_list = [ classes.index(o['name']) for o in target['annotation']['object'] ]\n one_hot_list = [0]*20\n for i in cls_list:\n one_hot_list[i] = 1\n cls_tensor = torch.Tensor(one_hot_list)\n return cls_tensor\n\n\n\ndef extract_class_and_bndbox(target: dict,\n image_transform_params: dict):\n\n \"\"\"\n target['annotation'] = {'filename': '2008_000008.jpg',\n 'folder': 'VOC2012',\n 'object': [{'bndbox': {'xmax': 471,\n 'xmin': 53,\n 'ymax': 420,\n 'ymin': 87},\n 'difficult': False,\n 'name': 'horse',\n 'occluded': True,\n 'pose': 'Left',\n 'truncated': False},\n {'bndbox': {'xmax': 289,\n 'xmin': 158,\n 'ymax': 167,\n 'ymin': 44},\n 'difficult': False,\n 'name': 'person',\n 'occluded': False,\n 'pose': 'Unspecified',\n 'truncated': True}],\n 'segmented': False,\n 'size': {'depth': 3, 'height': 442, 'width': 500},\n 'source': {'annotation': 'PASCAL VOC2008',\n 'database': 'The VOC2008 Database',\n 'image': 'flickr'}}\n example output :\n [{'bndbox': {'xmax': , 'xmin': , 'ymax':, 'ymin': }, 'class':5}, {'bndbox': {..}, 'class': ...}]\n \"\"\"\n\n return [{'bndbox': transform_bbox(o['bndbox'],\n {'width' : target['annotation']['size']['width'],\n 'height': target['annotation']['size']['height']},\n image_transform_params),\n 'class': classes.index(o['name'])}\n for o in target['annotation']['object']]\n\ndef transform_bbox(bbox: dict, input_image_size: dict, image_transform_params:dict):\n \"\"\"\n bbox : {'xmin': int, 'xmax': int, 'ymax': int, 'ymin': int}\n input_image_size : {'width': int, 'height': int}\n resize_image : one of ['none', 'shrink', 'crop']\n output_image_size : {'width': int, 'height': int}\n \"\"\"\n\n # The encoding is the center of the bounding box\n # and its width/height.\n # All these coordinates are relative to the output_image_size\n out_bbox = {\"cx\": 0.0, \"cy\": 0.0, \"width\": 0.0, \"height\": 0.0}\n\n image_mode = image_transform_params['image_mode']\n if image_mode == 'none':\n out_bbox[\"cx\"] = 0.5 * (bbox['xmin'] + bbox['xmax']) / input_image_size['width']\n out_bbox[\"cy\"] = 0.5 * (bbox['ymin'] + bbox['ymax']) / input_image_size['height']\n out_bbox[\"width\"] = float(bbox[\"xmax\"] - bbox[\"xmin\"]) / input_image_size[\"width\"]\n out_bbox[\"height\"] = float(bbox[\"ymax\"] - bbox[\"ymin\"]) / input_image_size[\"height\"]\n\n elif(image_mode == 'shrink'):\n output_image_size = image_transform_params['output_image_size']\n scale_width = float(output_image_size['width']) / input_image_size['width']\n scale_height = float(output_image_size['height']) / input_image_size['height']\n out_bbox[\"cx\"] = scale_width * 0.5 * (bbox['xmin'] + bbox['xmax']) / output_image_size['width']\n out_bbox[\"cy\"] = scale_height * 0.5 * (bbox['ymin'] + bbox['ymax']) / output_image_size['height']\n out_bbox[\"width\"] = scale_width * float(bbox[\"xmax\"] - bbox[\"xmin\"]) / output_image_size[\"width\"]\n out_bbox[\"height\"] = scale_height * float(bbox[\"ymax\"] - bbox[\"ymin\"]) / output_image_size[\"height\"]\n\n elif(image_mode == 'crop'):\n output_image_size = image_transform_params['output_image_size']\n offset_width = int(round((input_image_size['width'] - output_image_size['width']) / 2.))\n offset_height = int(round((input_image_size['height'] - output_image_size['height']) / 2.))\n\n cropped_bbox = {\"xmin\": 0.0, \"xmax\": 0.0, \"ymin\": 0.0, \"ymax\": 0.0}\n for sfx in ['min', 'max']:\n cropped_bbox['x%s'%sfx] = min(max(bbox['x%s'%sfx] - offset_width, 0), output_image_size['width'])\n cropped_bbox['y%s'%sfx] = min(max(bbox['y%s'%sfx] - offset_height, 0), output_image_size['height'])\n out_bbox[\"cx\"] = 0.5 * (cropped_bbox['xmin'] + cropped_bbox['xmax']) / output_image_size['width']\n out_bbox[\"cy\"] = 0.5 * (cropped_bbox['ymin'] + cropped_bbox['ymax']) / output_image_size['height']\n out_bbox[\"width\"] = float(cropped_bbox[\"xmax\"] - cropped_bbox[\"xmin\"]) / output_image_size[\"width\"]\n out_bbox[\"height\"] = float(cropped_bbox[\"ymax\"] - cropped_bbox[\"ymin\"]) / output_image_size[\"height\"]\n\n else:\n raise ValueError('invalid image_mode for transform_bbox, got \"{}\"'.format(image_mode))\n return out_bbox\n\n\ndef filter_largest(objects: list):\n \"\"\"\n This builds and return a function which acts the way depicted below.\n output_image_size and mode is used to adapt the bounding box coordinates\n example input :\n objects : [{'bndbox': {'cx': ..., 'cy': ....,\n width': ..., 'height': ...},\n 'class': 5},\n {'bndbox': {'cx': ..., 'cy': ....,\n 'width': ..., 'height': ...},\n 'class': 4},\n ...]\n example output (a single object) :\n {'bndbox': {}, 'class': 5}\n \"\"\"\n\n largest_object_idx = None\n largest_size = None\n for ko, o in enumerate(objects):\n bbox = o['bndbox']\n o_size = bbox['width'] * bbox['height']\n if not largest_size or o_size > largest_size:\n largest_object_idx = ko\n largest_size = o_size\n\n return objects[largest_object_idx]\n\n\ndef target_to_tensor(obj: dict):\n \"\"\"\n Input :\n obj :{'bndbox': {}, 'class': 5}\n Output : two tensors,\n - the first with [cx, cy, width, height]\n - the second with [class]\n \"\"\"\n label = obj['class']\n bndbox = obj['bndbox']\n\n return {'bboxes': torch.Tensor([bndbox['cx'], bndbox['cy'], bndbox['width'], bndbox['height']]),\n 'labels': torch.LongTensor([label])}\n\n\ndef cell_idx_of_center(coordinates, num_cells: int):\n return math.floor(coordinates[0] * num_cells), math.floor(coordinates[1] * num_cells)\n\ndef targets_to_grid_cell_tensor(objects: list, num_cells: int):\n \"\"\"\n This builds and return a function which acts the way depicted below.\n output_image_size and mode is used to adapt the bounding box coordinates\n Example:\n Input\n objects : [{'bndbox': {}, 'class': 5}, {'bndbox': {}, 'class': 4}, ...]\n num_cells : 6\n Output : three tensors\n 'bboxes' : (6, 6, 4) with (cx, cy, width ,height)\n 'has_obj': (6, 6) whether the cell (i,j) contains the center of an object\n 'labels' : (6, 6) labels\n Every cell is affected at most one object; If multiple objects share the same cell\n only one is preserved\n \"\"\"\n bboxes = torch.zeros((4, num_cells, num_cells),dtype=torch.float)\n has_obj = torch.zeros((num_cells, num_cells), dtype=torch.int)\n labels = torch.zeros((num_cells, num_cells),dtype=torch.int)\n for ko, o in enumerate(objects):\n bndbox = o['bndbox']\n cx, cy, width, height = bndbox['cx'], bndbox['cy'], bndbox['width'], bndbox['height']\n cj, ci = cell_idx_of_center((cx, cy), num_cells)\n bboxes[:, ci, cj] = torch.Tensor([cx*num_cells - cj, cy*num_cells - ci, width, height])\n has_obj[ci, cj] = 1\n labels[ci, cj] = o['class']\n return {'bboxes': bboxes, 'has_obj': has_obj, 'labels': labels}\n\ndef check_key(d, key, valid_values):\n if not key in d:\n raise KeyError('Missing key {} in dictionnary {}'.format(key, d))\n if not d[key] in valid_values:\n raise ValueError(\"Key {}: got \\\"{}\\\" , expected one of {}\".format(key, d[key], valid_values))\n\ndef validate_image_transform_params(image_transform_params: dict):\n \"\"\"\n {'image_mode'='none'}\n {'image_mode'='shrink', output_image_size={'width':.., 'height': ..}}\n {'image_mode'='crop' , output_image_size={'width':.., 'height': ..}}\n \"\"\"\n check_key(image_transform_params, 'image_mode', ['none', 'shrink', 'crop'])\n\n if(image_transform_params['image_mode'] == 'none'):\n return\n else:\n assert('output_image_size' in image_transform_params)\n assert(type(image_transform_params['output_image_size']) is dict)\n assert('width' in image_transform_params['output_image_size'])\n assert('height' in image_transform_params['output_image_size'])\n\ndef make_image_transform(image_transform_params: dict,\n transform: object):\n \"\"\"\n image_transform_params :\n {'image_mode'='none'}\n {'image_mode'='shrink', output_image_size={'width':.., 'height': ..}}\n {'image_mode'='crop' , output_image_size={'width':.., 'height': ..}}\n transform : a torchvision.transforms type of object\n \"\"\"\n validate_image_transform_params(image_transform_params)\n\n resize_image = image_transform_params['image_mode']\n if resize_image == 'none':\n preprocess_image = None\n elif resize_image == 'shrink':\n preprocess_image = transforms.Resize((image_transform_params['output_image_size']['width'],\n image_transform_params['output_image_size']['height']))\n elif resize_image == 'crop':\n preprocess_image = transforms.CenterCrop((image_transform_params['output_image_size']['width'],\n image_transform_params['output_image_size']['height']))\n\n if preprocess_image is not None:\n if transform is not None:\n image_transform = transforms.Compose([preprocess_image, transform])\n else:\n image_transform = preprocess_image\n else:\n image_transform = transform\n\n return image_transform\n\n\ndef validate_target_transforms_params(target_transform_params: dict):\n \"\"\"\n {'target_mode'='orig'}\n {'target_mode'='preprocessed'}\n {'target_mode'='largest_bbox', 'image_transform_params': dict}\n {'target_mode'='all_bbox' , 'image_transform_params': dict, 'num_cells': int}\n \"\"\"\n check_key(target_transform_params, 'target_mode', ['orig', 'preprocessed', 'largest_bbox', 'all_bbox', 'custom', 'only_cls'])\n\n if(target_transform_params['target_mode'] in ['orig', 'preprocessed']):\n return\n else:\n assert('image_transform_params' in target_transform_params)\n assert(type(target_transform_params['image_transform_params']) is dict)\n validate_image_transform_params(target_transform_params['image_transform_params'])\n if(target_transform_params['target_mode'] == 'all_bbox'):\n assert('num_cells' in target_transform_params)\n\n\ndef make_target_transform(target_transform_params: dict):\n \"\"\"\n target_mode :\n orig : keeps the original unaltered targets\n preprocessed : perform some preprocessing on the targets, see data.preprocess_target\n all_bbox : keeps all the bounding boxes and convert them into \"grid cell\" tensors\n largest_bbox : outputs a tensor with the largest bbox (4 numbers)\n see also help(validate_target_transforms_params)\n \"\"\"\n validate_target_transforms_params(target_transform_params)\n\n target_mode = target_transform_params['target_mode']\n if target_mode == 'orig':\n return None\n elif target_mode == 'preprocessed':\n t_transform = lambda target: preprocess_target(target)\n elif target_mode == 'only_cls':\n t_transform = lambda target: extract_class_to_tensor(preprocess_target(target))\n else:\n image_transform_params = target_transform_params['image_transform_params']\n get_bbox = lambda target: extract_class_and_bndbox(preprocess_target(target), image_transform_params)\n if target_mode == 'largest_bbox':\n # Modify the get_bbox to keep only the largest bounding box\n t_transform = lambda target: target_to_tensor(filter_largest(get_bbox(target)))\n elif target_mode == 'custom':\n t_transform = lambda target: [target_to_tensor(i) for i in get_bbox(target)]\n else:\n t_transform = lambda target: targets_to_grid_cell_tensor(get_bbox(target), target_transform_params['num_cells'])\n \n return t_transform\n\n\n\ndef make_trainval_dataset(image_transform_params: dict,\n transform: object,\n target_transform_params: dict,\n download: bool):\n dataset_dir = './voc'\n image_transform = make_image_transform(image_transform_params, transform)\n target_transform = make_target_transform(target_transform_params)\n\n dataset_train = VOC.VOCDetection(root=dataset_dir, image_set='train',\n transform = image_transform,\n target_transform = target_transform,\n download=True)\n dataset_val = VOC.VOCDetection(root=dataset_dir, image_set='val' ,\n transform = image_transform,\n target_transform = target_transform,\n download=True)\n return dataset_train, dataset_val","sub_path":"torch-CAM/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":15426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"227576479","text":"import panduan\n\nimport zimua\n\n#tashilei\n# niyaoshilihua\n# aa = zimua.Game()\n# aa.a()\ngame =input(\"请选择游戏\\n数字游戏()\\n字母游戏()\\n输入1或2:\")\nif game == \"1\":\n # 实例化对象\n gun_num = panduan.GameGum()\n gun_num.num_member(0,0)\nelif game == \"2\":\n gun_nu = zimua.Game()\n gun_nu.a()\n gun_nu.l()\nelse:\n print(\"输入错误\")\n","sub_path":"User/day1/ZIMUDEMO.py","file_name":"ZIMUDEMO.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"393958385","text":"\nimport os\nfrom orun.extjs import *\nfrom orun.servers import cp\nimport cherrypy\n\nTHEME_MODEL = 'classic'\nTHEME = 'gray'\nCHART_THEME_MODEL = 'classic/classic' if THEME_MODEL == 'classic' else 'modern/modern-' + THEME\nBASE_URL = ''\nENABLE_CHARTS=False\n\n@cherrypy.expose\nclass ExtJS:\n _cp_config = {\n 'tools.staticdir.on': True,\n 'tools.staticdir.dir': os.path.join(os.path.dirname(__file__), 'static','ext-6.2.0'),\n }\n\nclass ExtApplication(cp.Application):\n def __init__(self, title=''):\n super(ExtApplication, self).__init__(title)\n self.ext_620 = ExtJS()\n \n @cherrypy.expose\n def index(self, *args, **kwargs):\n f = open(os.path.join(os.path.dirname(__file__), 'app.html')).read()\n self.main()\n \n CHART_HTML_CODE = '''\n \n'''.format(base_url=BASE_URL,\n theme_model=THEME_MODEL,\n chart_theme_model=CHART_THEME_MODEL)\n EXTJS_PACKAGES = '' + CHART_HTML_CODE if ENABLE_CHARTS else ''\n \n return f.format(title=self.title,\n base_url=BASE_URL,\n theme=THEME,\n theme_model=THEME_MODEL,\n script=str(js.js_manager),\n packages=EXTJS_PACKAGES)\n \n @cherrypy.expose\n def ajax_callback(self, *args, **kwargs):\n fn = kwargs.pop('fn')\n if fn:\n fn = js.live_methods[int(fn)].func\n fn(*args, **kwargs)\n return str(js.js_manager)\n \n @cherrypy.expose\n @cp.cherrypy.tools.json_out()\n def ajax_func_callback(self, *args, **kwargs):\n fn = kwargs.pop('fn')\n if fn:\n fn = js.live_methods[int(fn)].func\n res = fn(*args, **kwargs)\n return {\n 'data': res\n }\n else:\n return {\n 'data': None\n }\n \nif __name__ == '__main__':\n app = ExtApplication('Orun (ExtJS Application)')\n app.run()\n","sub_path":"orun/extjs/cp.py","file_name":"cp.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"232349788","text":"# -*- coding: utf-8 -*-\n\"\"\"\n Main\n author: Code Islet\n since: 0.1.0\n\"\"\"\nimport sys\nimport atexit\nfrom common.logger import log\nfrom common.config import get_config, get_opts\nfrom PyQt5.QtWidgets import *\nfrom lib.window import Window\nfrom application import Application\n\n\nclass MainGui:\n\n def __init__(self):\n self._API_CONFIG = get_config('1q_api')\n self._window = Window(get_config('node_name'))\n self._app = Application(self._window)\n\n def exec(self, start=False):\n \"\"\" execution\n :return: void\n \"\"\"\n try:\n self._window.set_real_name(self._API_CONFIG['target']['real_name'])\n self._window.set_symbol_name(self._API_CONFIG['target']['symbol'])\n self._window.append_log('[run] Initialize main application.')\n self._window.set_quit_button_listener(self.quit)\n self._window.set_run_button_listener(self.run)\n self._window.open_window()\n if start is True:\n self.run()\n except Exception as _e:\n self._window.append_log('[exec] ERROR: {}'.format(str(_e)))\n\n def stop(self):\n \"\"\" stop\n :return: void\n \"\"\"\n try:\n self._window.append_log('[stop] service stopping..')\n self._window.set_run_button_text('STOPPING..')\n self._window.delete_run_button_listener()\n self._app.disconnect()\n self._window.append_log('[stop] service stop complete.')\n except Exception as _e:\n self._window.append_log('[stop] ERROR: {}'.format(str(_e)))\n finally:\n self._window.set_run_button_text('START')\n self._window.set_run_button_listener(self.run)\n\n def quit(self):\n \"\"\" quit application\n :return: void\n \"\"\"\n self.stop()\n self._window.close_window()\n sys.exit()\n\n def run(self):\n \"\"\" start\n :return: void\n \"\"\"\n try:\n self._window.set_run_button_text('STARTING..')\n self._window.delete_run_button_listener()\n self._app.connect()\n self._window.set_run_button_text('STOP')\n self._window.set_run_button_listener(self.stop)\n self._app.listen_real()\n except Exception as _e:\n self._window.append_log('[run] ERROR: {}'.format(str(_e)))\n self.stop()\n\n\napp = QApplication(sys.argv)\nopts = get_opts()\nauto_start = False\nif 's' in opts:\n auto_start = True\nmain_gui = MainGui()\n\n\ndef main():\n try:\n main_gui.exec(auto_start)\n sys.exit(app.exec_())\n except Exception as _e:\n log.error(str(_e))\n\n\ndef at_exit():\n if main_gui is not None:\n log.info('[exit] quit application.')\n main_gui.quit()\n\n\natexit.register(at_exit)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"536091476","text":"import unittest\n\nfrom wufflib.collections import MultiDict\n\n\nclass TestCollections(unittest.TestCase):\n def test_multi_dict(self):\n multi_dict = MultiDict({'a': [1, 2, 3]})\n\n self.assertEqual(1, multi_dict.get('a'))\n self.assertEqual([1, 2, 3], multi_dict.getlist('a'))\n","sub_path":"library/wufflib/collections_test.py","file_name":"collections_test.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"18696568","text":"# Author: Alexander Ponamarev (alex.ponamaryov@gmail.com) 04/30/2017\nimport tensorflow as tf\nfrom .NetTemplate import NetTemplate\n\nclass ClassificationTemplate(NetTemplate):\n def __init__(self, X_placeholders, Y_placeholders, n_classes, default_activation='elu',\n dtype=tf.float32, probability_density = None, trainset_mean=None, gpu=\"/gpu:0\"):\n\n self._gpu = gpu\n self._img_mean = trainset_mean\n\n tf.add_to_collection('inputs', X_placeholders)\n tf.add_to_collection('inputs', Y_placeholders)\n\n with tf.name_scope('inputs'):\n self.X = X_placeholders\n tf.summary.image(\"imgs\", self.X, max_outputs=6)\n self.labels = Y_placeholders\n\n X_float = tf.divide(self.X, 255.0)\n if self._img_mean is not None:\n self.X_norm = tf.subtract(X_float, self._img_mean, name=\"X_norm\")\n else:\n shape = X_float.get_shape().as_list()\n shape[0] = -1\n X_flat = tf.contrib.layers.flatten(X_float)\n self.X_norm = tf.reshape(tf.subtract(X_flat, tf.reduce_mean(X_flat, axis=1, keep_dims=True, name=\"X_norm\")),\n shape)\n tf.summary.image(\"norm_imgs\", self.X_norm, max_outputs=6)\n\n\n self._N_CLASSES = n_classes\n\n if probability_density is None:\n self.pdf=None\n else:\n self.pdf = tf.constant(probability_density, shape=[self._N_CLASSES],\n dtype=dtype, name='probability_density_function')\n\n NetTemplate.__init__(self,\n dropout_keep_rate=tf.placeholder(dtype=tf.float32, shape=[], name=\"dropout_keep_prob\"),\n training_mode_flag=tf.placeholder(dtype=tf.bool, shape=[], name=\"is_training_phase\"),\n default_activation=default_activation,\n dtype=dtype)\n\n self._assemble()\n\n def _assemble(self):\n self._define_net()\n print(\"Autoencoder was successfully created.\")\n self._define_loss()\n print(\"Loss definition was successfully created.\")\n self._define_optimization_method()\n print(\"Optimization function was initialized.\")\n self._define_prediction()\n self._define_accuracy()\n print(\"{} is ready for training!\".format(type(self).__name__))\n\n\n def _define_loss(self):\n\n # Note: When is_training is True the moving_mean and moving_variance need to be updated, by default the update_ops are placed in tf.GraphKeys.UPDATE_OPS so they need to be added as a dependency to the train_op, example:\n # update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n # if update_ops:\n # updates = tf.group(*update_ops)\n # total_loss = control_flow_ops.with_dependencies([updates], total_loss)\n # Reference: http://ruishu.io/2016/12/27/batchnorm/\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n\n with tf.device(self._gpu):\n with tf.control_dependencies(update_ops):\n # Ensures that we execute the update_ops before performing the train_step\n with tf.name_scope(\"cross_entropy_loss\"):\n if self.pdf is None:\n self.total_loss = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.labels,\n logits=self.feature_map)\n )\n else:\n P_of_x = tf.nn.softmax(logits=self.feature_map)\n P_of_x_given_PDF = tf.divide(P_of_x, self.pdf)\n\n self.total_loss = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.labels,\n logits=P_of_x_given_PDF)\n )\n\n tf.summary.scalar(\"total_loss\", self.total_loss)\n\n def _define_prediction(self):\n assert self.feature_map is not None, \"Error: Feature map wasn't defined.\"\n with tf.device(self._gpu):\n with tf.name_scope(\"class_prediction\"):\n self.probability_op = tf.nn.softmax(self.feature_map, name=\"probability\")\n self.predict_class_op = tf.arg_max(self.probability_op, 1, name=\"label\")\n tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, self.predict_class_op)\n\n def _define_accuracy(self):\n with tf.device(self._gpu):\n self.accuracy_op = tf.reduce_mean(\n tf.cast(tf.equal(self.predict_class_op, self.labels, name=\"predict_accuracy\"),\n dtype=tf.float32)\n )\n tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, self.accuracy_op)\n\n tf.summary.scalar(\"accuracy\", self.accuracy_op)\n\n def eval(self, X_batch, Y_batch):\n sess = tf.get_default_session()\n accuracy = sess.run(self.accuracy_op, feed_dict={self.X: X_batch,\n self.labels: Y_batch,\n self.is_training_mode: False,\n self.dropout_keep_rate: 1.0})\n return accuracy\n\n def fit(self, X_batch, Y_batch, dropout_keep_prob=0.75):\n sess = tf.get_default_session()\n _, accuracy = sess.run([self.optimization_op, self.accuracy_op],\n feed_dict={self.X: X_batch,\n self.labels: Y_batch,\n self.is_training_mode: True,\n self.dropout_keep_rate: dropout_keep_prob}\n )\n return accuracy\n\n def infer(self, X_batch):\n sess = tf.get_default_session()\n probability, predict_class = sess.run([self.probability_op, self.predict_class_op],\n feed_dict={self.X: X_batch,\n self.is_training_mode: False,\n self.dropout_keep_rate: 1.0})\n return predict_class, probability\n\n\n\n","sub_path":"src/nets/ClassificationTemplate.py","file_name":"ClassificationTemplate.py","file_ext":"py","file_size_in_byte":6350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"459784651","text":"# 1. reverse the string \ns = input(\"Enter string \")\n# print(s[::-1])\n\n# print(''.join(reversed(s)))\n\nrev = \"\"\ni = len(s) - 1 # 5\nwhile i>=0: \n last_char = s[i]\n i = i-1\n rev = rev+last_char\nprint(rev)\n\n","sub_path":"String/eighteen.py","file_name":"eighteen.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"543291173","text":"class Email:\n def __init__(self, sender, receiver, content, is_sent=False):\n self.sender = sender\n self.receiver = receiver\n self.content = content\n self.is_sent = is_sent\n\n def sent(self):\n self.is_sent = True\n\n def get_info(self):\n return f'{self.sender} says to {self.receiver}: {self.content}. Sent: {self.is_sent}'\n\n\nmailbox = []\n\nwhile True:\n email = input()\n if email == 'Stop':\n break\n\n sender, receiver, content = email.split(' ')\n current_email = Email(sender, receiver, content)\n mailbox.append(current_email)\n\nsent_emails = list(map(int, input().split(', ')))\n\nfor index in sent_emails:\n mailbox[index].sent()\n\nfor email in mailbox:\n print(email.get_info())\n","sub_path":"06-01-Classes-and-Objects-Lab/03-Email.py","file_name":"03-Email.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"266870225","text":"\"\"\"Bootstrap for running a Django app under Google App Engine.\n\nThe site-specific code is all in other files: settings.py, urls.py,\nmodels.py, views.py. And in fact, only 'settings' is referenced here\ndirectly -- everything else is controlled from there.\n\n\"\"\"\n\n# Standard Python imports.\nimport os\nimport sys\nimport logging\nimport __builtin__\nimport webapp2\nimport views\nimport forum\nfrom attachment import ImageHandler\n\n# Enable info logging by the app (this is separate from appserver's\n# logging).\nlogging.getLogger().setLevel(logging.INFO)\n\n# Create a Django application for WSGI.\napp = webapp2.WSGIApplication( [\n ('/', views.Index ),\n ('/download', views.Download ),\n ('/upload', views.Upload ),\n webapp2.Route(r'/a/i/', handler=ImageHandler,\n name='showimage'),\n webapp2.Route(r'/a/i/upload', handler=ImageHandler,\n name='uploadimage'),\n webapp2.Route(r'/f/b/add', handler=forum.views.AddBoard,\n name='addboard'),\n webapp2.Route(r'/f/b/', handler=forum.views.ListBoard,\n name='listboard'),\n webapp2.Route(r'/f/p/add', handler=forum.views.AddPost,\n name='addpost'),\n webapp2.Route(r'/f/p/update', handler=forum.views.UpdatePost,\n name='updatepost'),\n webapp2.Route(r'/f/p/', handler=forum.views.ListPost,\n name='listpost'),\n ], debug=True )\n\n","sub_path":"RedFoxSpamDB/bootstrap.py","file_name":"bootstrap.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"481856658","text":"import nltk\nrules = \"\"\"\n# Grammar\nS -> NP VP | Aux NP VP | VP\nNP -> Pron | PN | Det Nom\nNom -> N | Nom N | Nom PP\nVP -> V | V NP | V NP PP | V PP | VP PP\nPP -> P NP\n# Lexical rules (selection):\nDet -> that | this | the | a\nN -> book | flight | meal | money\nV -> book | include | prefer\nPron -> I | she | me\nPN -> Houston | Orlando | NWA\nAux -> does\nP -> from | to | on | near | through\n\"\"\"\n\n\ngrammar = nltk.CFG.fromstring(rules)\n\ns = {'lhs':grammar.start(),'rhs':(),'dot':0,'pos':(0,0)}\ns_2 = {'a':1,'b':(1,2)}\nt = {'a':1,'b':(1,2)}\nprint(s_2==t)\n\ndef is_pos(sym):\n for prod in grammar.productions(lhs=sym):\n if len(prod.rhs()) == 1 and isinstance(prod.rhs()[0],str):\n return True\n return False\n\ndef early_parse(words):\n\n\n chart = [[] for i in range(len(words) +1)]\n\n def is_complete(state):\n return state['dot'] == len(state['rhs'])\n def enqueue(state,column):\n if state not in column:\n column.append(state)\n def predictor(state):\n for prod in grammar.productions(lhs = state['rhs'][state['dot']]):\n enqueue({'lhs':prod.lhs(),\n 'rhs':prod.rhs(),\n 'dot':0,\n 'pos':state(['pos'][1],state['pos'][1])},\n chart[state['pos'][1]])\n def scanner(state):\n pass\n def completer(state):\n pass\n\n enqueue({'lhs':nltk.grammar.Nonterminal('~'),\n 'rhs':(grammar.start(),),\n 'dot':0,\n 'pos':(0,0)}\n ,chart[0])\n for i in range(len(words)+1):\n for state in chart[i]:\n if not is_complete(state) and not is_pos(state['rhs'][state['dot']]):\n predictor(state)\n elif not is_complete(state) and is_pos(state['rhs'][state['dot']]):\n scanner(state)\n else:\n completer(state)\n\n","sub_path":"NOTES/3:39.py","file_name":"3:39.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"243053846","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.contrib import messages\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.views import LoginView\nfrom .forms import (UserRegisterForm, \n UserProfileUpdateForm, UserUpdateForm)\nfrom assignment.models import StudentOtherCourse, Assignment, SelectCourse\nUser = get_user_model()\n\ndef register(request):\n if request.method == 'POST':\n u_form = UserRegisterForm(request.POST)\n if u_form.is_valid():\n u_form.save()\n index_number = u_form.cleaned_data.get('index_number')\n index_num = u_form.cleaned_data.get('index')\n messages.success(request, \"Registered successfully!!!\")\n return HttpResponseRedirect(reverse('login'))\n else:\n u_form = UserRegisterForm()\n context = {\n 'u_form': u_form, \n }\n return render(request, 'users/register.html', context)\n\n\n\n@login_required\ndef profile(request, pk, i_n):\n sel = SelectCourse.objects.get(pk=pk)\n o_t = StudentOtherCourse.objects.get(id=i_n)\n\n # o_t = StudentOtherCourse.objects.filter(choose_course=sel.courses).first()\n if request.method == 'POST':\n u_form = UserUpdateForm(request.POST, instance=o_t.user)\n p_form = UserProfileUpdateForm(request.POST, request.FILES, instance=o_t.user.userprofile)\n if u_form.is_valid and p_form.is_valid:\n u_form.save()\n p_form.save()\n messages.success(request, \"Profile changed successfully!!!\")\n return HttpResponseRedirect(reverse('profile', args=(sel.user.pk,i_n,)))\n else:\n u_form = UserUpdateForm(instance=o_t.user)\n p_form = UserProfileUpdateForm(instance=o_t.user.userprofile)\n context = {\n 'u_form': u_form,\n 'p_form': p_form,\n 'o_t': o_t, \n 'sel': sel, \n }\n return render(request, 'users/profile.html', context)\n\n\ndef back(request, pk):\n u = User.objects.get(pk=pk)\n return HttpResponseRedirect(reverse('submit-assignment', args=(u.pk,)))\n\n# class CustomLoginView(LoginView):\n# authentication_form = CustomAuthenticationForm\n\n\ndef login_success(request):\n if request.user.is_staff or request.user.is_superuser:\n messages.success(request, \"Login successful!!!\")\n return HttpResponseRedirect(reverse('assignment-section:all-courses'))\n else:\n messages.success(request, \"Login successful!!!\")\n return HttpResponseRedirect(reverse('assignment-section:submit-assignment'))\n\n\n","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"629825597","text":"#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#This file is where I declare the strategy objects, as strategy object is\n#a thing which can tell you what it would buy and sell on any given day and\n#importantly it can also test itself.\n\n#~~~Features of a strategy object:~~~\n#-->evaluateStrategyAtCurrentTime - when this function is run, the returned\n# value must be a list of 4 numbers indicating the requested CHANGE in\n# holdings of each of a set of assets at the particular date specified.\n# The set of assets depends on whether the dataSetType is set to training\n# or crypto, and your function should return different values accordingly.\n# Getting a good one of these functions is the big challenge.\n#-->backtest - when this function is run, the strategy is to be evaluated.\n# You probably wont need to change this. It basically just runs through\n# a set of dates and trades upon what evaluateStrategyAtCurrentTime tells\n# it to do. And then produces a graph at the end for you to see how it did.\n#\n#Lastly you should know what the two datasets are. I'm going to write it here\n#so that you dont need to look into apiWrapper.py (because its pretty grim).\n#The Crypto set is simply prices for LTC, ETH and BTC for the last 6 months (I'm\n#working on making that longer). The Training set is very important, this is\n#the one you will be running most of your tests on and you can optimise your\n#training strategy as much as you want on it. It consists of 6 prices, the first\n#one is the polish zloty to GBP exchange rate, and the second is the inverse of\n#that exchange rate (we need to make sure our algorithms aren't gonna lose us\n#money if the market turns downwards). The third is chinese Yuan to GBP and the\n#4th is that inverse. The 5th is Stellar Lumens crypto prices, and the 6th is\n#that inverse.\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nfrom CryptoTrader.handyFunctions import *\nfrom CryptoTrader.apiWrapper import *\n#The functions available from apiWrapper are:\n#1) getAvailableCurrencies() This was really just my first tester function, this\n# is what GDAX offers for trading through their api\n#2) getHistoricCryptoPrices(startdate, enddate, timeInterval) This gets you the\n#crypto data for between the specified dates - NB you cant get more than 6 months\n#atm. NB timeInterval of 84600 is 1 day\n#3) getHistoricTrainingPrices(startdate, enddate) This gets you the training data\n#There is no time interval because you can only get prices daily\nimport datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#So this is a 'base' class (object) for a trading strategy, all our other strategies\n#should inherit from this. It'll give you the backtest function so you don't need\n#to write your own\nclass TradingStrategy():\n def __init__(self):\n print('This is a placeholder for a function you need to create, and when you do that this one will be ignored, this line should never be run')\n\n #Tests your strategy\n def backtest(self, dataSetType):\n if dataSetType == 'Training':\n testStartDate = '2017-07-01'\n trainingPrices = getHistoricTrainingPrices(datetime.datetime.strptime(testStartDate, '%Y-%m-%d').date() - datetime.timedelta(days=1), datetime.date.today())\n elif dataSetType == 'Crypto':\n testStartDate = '2017-07-01'\n trainingPrices = getHistoricCryptoPrices(datetime.datetime.strptime(testStartDate, '%Y-%m-%d').date() - datetime.timedelta(days=1), datetime.date.today(), 86400)\n\n\n dateRange = dateTimeRange(testStartDate, datetime.date.today())\n profitAndLoss = np.zeros(len(dateRange)) #Defines an array showing you the profit on each day of trading with that strategy\n currentHoldings = np.zeros(trainingPrices.shape[0])\n for i in range(len(dateRange)):\n element = dateRange[i]\n #Queries the strategy for what it would do\n strategyOutput = self.evaluateStrategyAtCurrentTime(element, 'Training')\n #Adds its recommendation to the portfolio holdings\n currentHoldings = currentHoldings + strategyOutput\n #Goes through the holdings and adds up the profit made from holding each asset on this day\n for j in range(len(currentHoldings)):\n profitAndLoss[i] = profitAndLoss[i-1] + currentHoldings[j]*(trainingPrices[j, i] - trainingPrices[j, i-1])\n #plots the results\n plt.plot(dateRange, profitAndLoss)\n plt.show()\n\n def evaluateStrategyAtCurrentTime(self, time, dataSetType):\n print('This is a placeholder for a function you need to create, and when you do that this one will be ignored, this line should never be run')\n\n\n#This is my tester strategy, you should study this carefully. It has two functions both of which\n#overwrite function in the base class you can see above. The first is an initialiser __init__(self)\n#which is run when the object is created (like in line 10 of main.py), and the second is my genius\n#insightful trading strategy (lol jk its not very genius).\nclass LukesFirstStrategy(TradingStrategy):\n # NB. 'self' is a name for the strategy object.\n def __init__(self):\n #Gets the training prices into your object\n self.trainingPrices = getHistoricTrainingPrices('2017-01-01', datetime.date.today())\n # Gets the crypto prices into your object\n self.cryptoPrices = getHistoricCryptoPrices('2017-06-01', datetime.date.today(), 86400)\n #Makes an array of dates, just because it comes in handy\n self.dates = dateTimeRange('2017-06-01', datetime.date.today())\n #Defines a data structure (dictionary) which the strategy can store future trades it intends\n #to make in\n self.futureTrades = {}\n\n def evaluateStrategyAtCurrentTime(self, thisDaysDate, dataSetType):\n #This changes the numbers used in the evaluation of the strategy, depending on which dataset\n #it's being requested to use\n if dataSetType == 'Training':\n assetPrices = self.trainingPrices\n elif dataSetType == 'Crypto':\n assetPrices = self.cryptoPrices\n #finds todays date in the date array\n todaysIndex = self.dates.index(thisDaysDate)\n #restricts the prices you can use for your strategy, you cant use future prices in your strategy\n #because that would be cheating!\n useablePrices = assetPrices[:, 0:todaysIndex]\n\n #Checks if the strategy has any planned trades for today\n if thisDaysDate in self.futureTrades:\n futureCommitments = self.futureTrades[thisDaysDate]\n else:\n futureCommitments = np.zeros(useablePrices.shape[0])\n\n todaysTrade = np.zeros(useablePrices.shape[0])\n\n for i in range(len(useablePrices)): #iterates through the different cryptos we're looking at\n commodity = useablePrices[i, :]\n #And this is where my mega advanced algorithm is! It simply says that if the price of the\n #asset went up yesterday and the day before, then I think we should invest!!!\n if commodity[-2] < commodity[-1] and commodity[-3] < commodity[-2]:\n todaysTrade[i] = 10/commodity[-1] #Buy 10 dollars worth of that asset\n self.futureTrades[thisDaysDate + datetime.timedelta(days=3)] = -todaysTrade #Sell 10 dollars of that asset again in 3 days time\n return todaysTrade + futureCommitments #returns an array detailing the request change in holdings\n\n\nclass MattsFirstStrategy(TradingStrategy):\n def __init__(self):\n self.trainingPrices = getHistoricTrainingPrices('2017-06-01', datetime.date.today())\n self.cryptoPrices = getHistoricCryptoPrices('2017-06-01', datetime.date.today(), 86400)\n self.dates = dateTimeRange('2017-06-01', datetime.date.today())\n self.futureTrades = {}\n\n def evaluateStrategyAtCurrentTime(self, thisDaysDate, dataSetType):\n if dataSetType == 'Training':\n assetPrices = self.trainingPrices\n elif dataSetType == 'Crypto':\n assetPrices = self.cryptoPrices\n todaysIndex = self.dates.index(thisDaysDate)\n useablePrices = assetPrices[:, 0:todaysIndex]\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n #Your genius strategy goes here!\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n return","sub_path":"tradingStrategy.py","file_name":"tradingStrategy.py","file_ext":"py","file_size_in_byte":8373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"188777587","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error, r2_score\n\n\n\n# Pre-processing of the data\ndf_raw = pd.read_csv('assets/hourly_load&weather_data.csv', header=None, skiprows=1) # loading raw data from the CSV\ndf_raw_array = df_raw.values # numpy array\ny = np.array(df_raw[1])\ntemp = []\nfor i in range(y.size):\n temp.append([df_raw[0][i],df_raw[2][i],df_raw[3][i],df_raw[4][i],df_raw[5][i]])\nx = np.array(temp)\n\n\nprint(x)\nprint(y)\nmodel = LinearRegression().fit(x,y)\ny_pred = model.predict(x)\nprint('Predicted values:', y_pred)\nprint('Intercept:', model.intercept_)\nprint('Slope:', model.coef_)\n\n\nprint('The MSE value is:',\n mean_squared_error(y_pred, y, squared=True))\nprint('The RMSE value is:',\n mean_squared_error(y_pred, y, squared=False))\nprint('The R-squared value is:', r2_score(y_pred, y))\nprint('The MAPE value is:', np.mean(np.abs((y - y_pred) / y)) *100,'\\n')\n\n# Plotting the results\nfig = plt.figure()\nplt.plot((y_pred) * 100)\nplt.plot((y) * 100)\nplt.title(\"Baseline Model\")\nplt.xlabel('Hour')\nplt.ylabel('Electricity load')\nplt.legend(('Predicted', 'Actual'), fontsize='15')\nplt.show()\nfig.savefig('results/baseline_weather/final_output.jpg', bbox_inches='tight')\n\n\n# Storing the result in a file: 'load_forecasting_result.txt'\npredicted_test_result = y_pred\nnp.savetxt('results/baseline_weather/predicted_values.txt', predicted_test_result)\nactual_test_result = y\nnp.savetxt('results/baseline_weather/test_values.txt', actual_test_result)\n\n\n","sub_path":"baseline_weather.py","file_name":"baseline_weather.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"129702934","text":"#!/usr/bin/python3\n# -*- coding: UTF-8 -*- \n\nimport json\nimport requests\nimport time\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.header import Header\n\n\n##获取那个城市/区域的天气\ndq = '西安'\n\n##API请求地址\nurl = 'https://www.apiopen.top/weatherApi?city=' + dq\n\n##发送GET请求并返回json数据\nr = requests.get(url).json()\n\nwea = r['data']['forecast']\n\ntimeH = time.strftime(\"%H\", time.localtime())\n\nintTime = int(timeH)\n\n\n##判断是否需要带伞\ndef umbrella(y):\n rain = '雨'\n result = rain in wea[y]['type']\n if result:\n \n return '记得带伞'\n else:\n return r['data']['ganmao']\n\n\n\ndef mail(title,numWe): \n # 第三方 SMTP 服务\n mail_host=\"smtp.qq.com\" #设置服务器\n mail_user=\"user@qq.com\" #用户名\n mail_pass=\"授权码\" #口令 \n \n \n sender = 'user@qq.com' \n receivers = ['mail@qq.com'] # 接收邮件,可设置为你的QQ邮箱或者其他邮箱\n \n message = MIMEText(title, 'plain', 'utf-8')\n message['From'] = Header(\"带伞小助手\", 'utf-8')\n message['To'] = Header(\"带伞\", 'utf-8') #提醒人\n \n subject = time.strftime(\"%Y\", time.localtime()) +'年'+ wea[numWe]['date']\n message['Subject'] = Header(subject, 'utf-8')\n \n \n try:\n smtpObj = smtplib.SMTP() \n smtpObj.connect(mail_host, 25) # 25 为 SMTP 端口号\n smtpObj.login(mail_user,mail_pass)\n smtpObj.sendmail(sender, receivers, message.as_string())\n print (\"邮件发送成功\")\n except smtplib.SMTPException:\n print (\"Error: 无法发送邮件\")\n\n\n\n\n\n#判断时间端 \n\nif intTime >= 7 and intTime < 19:\n print('今天' + wea[0]['high'])\n print(umbrella(0))\n mail(umbrella(0),0)\n \nelse:\n print('明天'+ wea[1]['high'])\n umbrella(1)\n mail(umbrella(1),1)\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"512051043","text":"import requests\nfrom pprint import pprint\nimport pickle \nfrom openpyxl import load_workbook, workbook\nimport pandas as pd\nimport sys\nimport calendar\nimport datetime\nfrom dateutil import relativedelta \nfrom dotenv import load_dotenv\nimport os\n\nload_dotenv()\nup_api_key = os.getenv('up_api_key')\ncurrent_account = os.getenv('current')\n\ndef get_query_time_interval():\n ### This function returns the time period of interest based on the current date\n current_date = datetime.datetime.now()\n one_month = relativedelta.relativedelta(months = 1)\n query_date = current_date - one_month\n query_month = query_date.month\n query_year = query_date.year\n query_days = calendar.monthrange(query_year, query_month)[1]\n\n since = '{}-{}-01T00:00:00Z'.format(query_year, str(query_month).zfill(2))\n until = '{}-{}-{}T00:00:00Z'.format(query_year, str(query_month).zfill(2), query_days)\n\n return since, until, query_month, query_year, query_days\n\ndef get_up_data(since, until):\n try:\n url = \"https://api.up.com.au/api/v1\"\n payload = {'filter[since]': since, 'filter[until]': until, 'page[size]':'100'}\n header = {'Authorization': up_api_key}\n print(header)\n r = requests.get(url+\"/transactions\", headers = header, params = payload)\n if r.ok:\n transaction_list = [r.json()]\n x = 0\n if r.json()['links']['next'] is None:\n x = 1\n while x == 0:\n r = requests.get(r.json()['links']['next'], headers = header, params = payload)\n if r.json()['links']['next'] is None:\n x = 1\n transaction_list.append(r.json())\n\n return transaction_list\n else: \n print(\"Bad response from Up Api = \", r.json())\n sys.exit()\n\n except Exception as e:\n print(\"error: {error_message}\". format(error_message=str(e)))\n \ndef sum_transactions(output):\n ### This function is used to sum up all the transaction values from the response dictionary\n category_dict = {}\n for response in output:\n for transaction in response['data']:\n id = transaction['relationships']['account']['data']['id']\n if id == current_account:\n category = transaction['relationships']['category']['data']\n if category is not None:\n category = category['id']\n if category not in category_dict:\n category_dict[category] = round(float(transaction['attributes']['amount']['value']), 2)\n else:\n category_dict[category] += round(float(transaction['attributes']['amount']['value']), 2)\n\n for key, value in category_dict.items():\n category_dict[key] = round(value,2)\n\n return category_dict\n\ndef check_new_types(category_dict):\n ### Used to check if there are new categories \n types_considered = [ 'groceries', 'health-and-medical', 'life-admin', 'restaurants-and-cafes', 'rent-and-mortgage', 'mobile-phone', 'takeaway', 'holidays-and-travel', 'hair-and-beauty', 'education-and-student-loans', 'news-magazines-and-books', 'public-transport', 'homeware-and-appliances', 'games-and-software', 'taxis-and-share-cars', 'clothing-and-accessories', 'gifts-and-charity', 'home-maintenance-and-improvements']\n diff_set = str(set(list(category_dict.keys())) - set(types_considered))\n error_message = 'There are new names in the incoming file ' + diff_set\n if diff_set != 'set()':\n sys.exit(error_message)\n\ndef category_selector(x):\n ### To arrange categories. Used with the function - order_df\n good_life_except = ('games-and-software', 'holidays-and-travel', 'restaurants-and-cafes', )\n take_away = ('takeaway')\n home_except = ('homeware-and-appliances', 'home-maintenance-and-improvements')\n personal = ('health-and-medical', 'life-admin', 'mobile-phone', 'hair-and-beauty', 'education-and-student-loans', 'news-magazines-and-books', 'clothing-and-accessories', 'gifts-and-charity')\n transport = ('public-transport', 'taxis-and-share-cars')\n\n if x in good_life_except:\n return 'Good life (except Takeaway)'\n elif x in take_away:\n return 'Takeaway'\n elif x in home_except:\n return 'Home (Other)'\n elif x == 'groceries':\n return 'Groceries'\n elif x == 'rent-and-mortgage':\n return 'Rent'\n elif x in personal:\n return 'Personal'\n else:\n return 'Transport'\n\ndef order_df(category_dict, query_month, query_year, query_days):\n ### This function converts the dictionary to dataframe (to be used to write excel)\n df = pd.DataFrame.from_dict([category_dict])\n df = df.transpose().reset_index()\n df.columns = ['type', 'amount']\n df['category'] = df['type'].apply(lambda x: category_selector(x))\n df = df.groupby(by='category').sum()\n df['amount'] = round(df['amount'] *-1,0)\n #Add zero value if there are no values in the category\n for cat in ['Good life (except Takeaway)', 'Groceries', 'Personal', 'Rent', 'Takeaway', 'Transport', 'Home (Other)']:\n if cat not in df.index:\n df.loc[cat] = 0\n \n df = df.transpose()\n df['total'] = df.sum(axis=1)\n df['month'] = query_month\n df['year'] = query_year\n df['days'] = query_days\n df['income'] = None\n df['hourly_rate'] = None \n df['hours_per_week'] = df['income']/df['hourly_rate']\n df['good_life_%'] = round(df['Good life (except Takeaway)']*100/df['total'],0)\n df['personal_%'] = round(df['Personal']*100/df['total'],0)\n df['rent_%'] = round(df['Rent']*100/df['total'],0)\n df['food_%'] = round((df['Groceries'] + df['Takeaway'])*100/df['total'],0)\n df['home_%'] = round(df['Transport']*100/df['total'],0)\n df['Transport_%'] = round(df['Transport']*100/df['total'],0)\n column_order = ['year','month', 'days', 'income', 'hourly_rate', 'hours_per_week', 'total', 'Good life (except Takeaway)', 'Takeaway', 'Personal', 'Rent', 'Groceries', 'Home (Other)', \n 'Transport', 'good_life_%', 'personal_%', 'rent_%', 'food_%', 'home_%', 'Transport_%']\n df = df.reindex(columns = column_order)\n\n return df\n\ndef convert_df_excel(df, file_name = 'demo.xlsx', sheet_name = 'Money'):\n ### This function converts df to excel file\n book = load_workbook(file_name) #Open_workbook in pyxl\n try:\n writer = pd.ExcelWriter(file_name, engine='openpyxl') #Open in Excel writer\n writer.book = book #connect both pyxl and pandas (excel_writer)\n writer.sheets = dict((ws.title, ws) for ws in book.worksheets)\n \n startrow = book[sheet_name].max_row\n start_column = 1\n\n #Check if the data already exists\n last_month = book[sheet_name].cell(row = startrow, column=3).value\n last_year = book[sheet_name].cell(row = startrow, column=2).value\n if last_month == query_month and last_year == query_year:\n book.save(file_name)\n error_string = \"The data already exists for month {} and year {}\".format(query_month, query_year)\n print(error_string)\n df.to_excel(writer, sheet_name, startrow=startrow-1, startcol = start_column, header = False, index = False)\n sys.exit('Data has been replaced') \n\n df.to_excel(writer, sheet_name, startrow=startrow, startcol = start_column, header = False, index = False)\n \n print(\"Data has been inserted\")\n book.save(file_name)\n\n except Exception as e:\n print(\"error: {error_message}\". format(error_message=str(e)))\n book.save(file_name)\n\nif __name__ == '__main__':\n since, until, query_month, query_year, query_days = get_query_time_interval()\n output = get_up_data(since, until)\n category_dict = sum_transactions(output)\n check_new_types(category_dict)\n df = order_df(category_dict, query_month, query_year, query_days)\n convert_df_excel(df)","sub_path":"up_api.py","file_name":"up_api.py","file_ext":"py","file_size_in_byte":7924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"202387860","text":"from talon import Context, Module\n\nctx = Context()\nmod = Module()\nmod.tag(\"npm\", desc=\"Tag for enabling npm nodejs package manager in terminal\")\n\nctx.matches = r\"\"\"\ntag: terminal\nand tag: user.npm\n\"\"\"\n","sub_path":"apps/npm/npm.py","file_name":"npm.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"212080309","text":"from flask import Flask, jsonify, request\n\napp = Flask(__name__)\n\nheaders = {'Content-Type': 'application/json'}\n\n@app.route('/example-get-static', methods=['GET'])\ndef getEndpointStatic():\n staticResponse = {\n \"num-example\" : 25,\n \"string-example\" : \"example string\",\n \"list example\" : [1,3,5],\n \"dict-example\" : {\n \"thing1\" : 1,\n \"thing2\" : 2\n }\n }\n # returns the json version if the dictionary\n return jsonify(staticResponse), 200\n\n\n@app.route('/example-get-dynamic', methods=['GET'])\ndef getEndpointDynamic():\n # will need the ?name=string param here (although string is not enforced)\n name = request.args.get(\"name\")\n\n if not name:\n return jsonify({'message': 'Name not provided in request as param'}), 400\n\n dynamicResponse = {\n \"user name\" : name\n }\n\n return jsonify(dynamicResponse), 200\n\n\n@app.route('/example-post', methods=['POST'])\ndef postEndpoint():\n \"\"\" The format to make a request is \n {\n \"age\" : some num,\n \"name\" : string\n }\n \"\"\"\n\n data = request.get_json()\n if not data:\n return jsonify({'message': 'JSON not provided in request as param'}), 400\n \n try:\n name = data[\"name\"]\n age = data[\"age\"]\n\n except:\n return jsonify({'message': 'JSON is incorrect'}), 400\n\n response_string = name + \" is \" + str(age) + \" years old.\"\n\n response = {\n \"string\" : response_string\n }\n\n return jsonify(response), 200\n\n\n\nif __name__ == '__main__':\n # run app in debug mode on port 5000\n\tapp.run(debug=True, port=5000) ","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"79369827","text":"import unittest\nfrom pytraj.common_actions import calc_molsurf\nfrom pytraj.base import *\nfrom pytraj.actions.CpptrajActions import Action_Surf\nfrom pytraj.actions import Action\nfrom pytraj.TrajectoryIterator import TrajectoryIterator\nfrom pytraj.datasets import cast_dataset\nfrom pytraj import adict\n\nprint(dir(Action_Surf()))\n\nfarray = Trajectory(top=Topology(\"./data/Tc5b.top\"), filename='data/md1_prod.Tc5b.x')\n\nclass TestSurf(unittest.TestCase):\n def test_0(self):\n print(\"newtop\")\n\n farray0 = farray.copy()\n newtop = farray0.top.copy()\n oldtop = farray0.top\n\n d0 = calc_molsurf(farray[0], \"@CA\", farray.top)\n print (d0[:])\n adict['molsurf'].help()\n \n toplist = TopologyList()\n toplist.add_parm(newtop)\n dslist = DataSetList()\n dflist = DataFileList()\n \n act = Action_Surf()\n act.read_input(\"@CA\", oldtop, dslist=dslist)\n act.process(oldtop, newtop)\n frame0 = Frame(farray.top.n_atoms)\n act.do_action(farray[0], frame0)\n print (dslist[0][:])\n act.help()\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_Action_Surf.py","file_name":"test_Action_Surf.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"191545363","text":"import requests\nfrom n26 import config\n\nurl = 'https://api.tech26.de'\n\n\n# Api class can be imported as a library in order to use it within applications\nclass Api(object):\n # constructor accepting None to maintain backward compatibility\n def __init__(self, cfg=None):\n if not cfg:\n cfg = config.get_config()\n self.config = cfg\n\n def get_token(self):\n values_token = {'grant_type': 'password', 'username': self.config.username, 'password': self.config.password}\n headers_token = {'Authorization': 'Basic YW5kcm9pZDpzZWNyZXQ='}\n\n response_token = requests.post(url + '/oauth/token', data=values_token, headers=headers_token)\n token_info = response_token.json()\n # TODO check if access_token is not nil\n return token_info['access_token']\n\n # TODO: this method will check if token is valid, if not it will run get_token\n def validate_token():\n pass\n\n # IDEA: @get_token decorator\n def get_account_info(self):\n access_token = self.get_token()\n headers_balance = {'Authorization': 'bearer' + str(access_token)}\n req_account_info = requests.get(url + '/api/me', headers=headers_balance)\n info = req_account_info.json()\n return info\n\n def get_account_statuses(self):\n access_token = self.get_token()\n headers_balance = {'Authorization': 'bearer' + str(access_token)}\n req_account_statuses = requests.get(url + '/api/me/statuses', headers=headers_balance)\n status = req_account_statuses.json()\n return status\n\n def get_addresses(self):\n access_token = self.get_token()\n headers_balance = {'Authorization': 'bearer' + str(access_token)}\n req_addresses = requests.get(url + '/api/addresses', headers=headers_balance)\n addresses = req_addresses.json()\n return addresses\n\n def get_balance(self):\n access_token = self.get_token()\n headers_balance = {'Authorization': 'bearer' + str(access_token)}\n req_balance = requests.get(url + '/api/accounts', headers=headers_balance)\n balance = req_balance.json()\n return balance\n\n def get_spaces(self):\n access_token = self.get_token()\n headers_spaces = {'Authorization': 'bearer' + str(access_token)}\n req_spaces = requests.get(url + '/api/spaces', headers=headers_spaces)\n spaces = req_spaces.json()\n return spaces\n\n def barzahlen_check(self):\n access_token = self.get_token()\n headers_balance = {'Authorization': 'bearer' + str(access_token)}\n req_barzahlen_check = requests.get(url + '/api/barzahlen/check', headers=headers_balance)\n barzahlen_check = req_barzahlen_check.json()\n return barzahlen_check\n\n def get_cards(self):\n access_token = self.get_token()\n headers_balance = {'Authorization': 'bearer' + str(access_token)}\n req_cards = requests.get(url + '/api/v2/cards', headers=headers_balance)\n cards = req_cards.json()\n return cards\n\n def get_account_limits(self):\n access_token = self.get_token()\n headers_balance = {'Authorization': 'bearer' + str(access_token)}\n req_limits = requests.get(url + '/api/settings/account/limits', headers=headers_balance)\n limits = req_limits.json()\n return limits\n\n def get_contacts(self):\n access_token = self.get_token()\n headers_balance = {'Authorization': 'bearer' + str(access_token)}\n req_contacts = requests.get(url + '/api/smrt/contacts', headers=headers_balance)\n contacts = req_contacts.json()\n return contacts\n\n def get_transactions(self):\n access_token = self.get_token()\n headers_balance = {'Authorization': 'bearer' + str(access_token)}\n req_transactions = requests.get(url + '/api/smrt/transactions', headers=headers_balance)\n transactions = req_transactions.json()\n return transactions\n\n def get_transactions_limited(self, limit=5):\n access_token = self.get_token()\n headers_balance = {'Authorization': 'bearer' + str(access_token)}\n req_transactions = requests.get(url + '/api/smrt/transactions?limit=' +\n str(limit), headers=headers_balance)\n transactions = req_transactions.json()\n return transactions\n\n def get_statements(self):\n access_token = self.get_token()\n headers_balance = {'Authorization': 'bearer' + str(access_token)}\n req_statements = requests.get(url + '/api/statements', headers=headers_balance)\n statements = req_statements.json()\n return statements\n\n def block_card(self, card_id):\n access_token = self.get_token()\n headers_balance = {'Authorization': 'bearer' + str(access_token)}\n req_block_card = requests.post(url + '/api/cards/' + card_id + '/block', headers=headers_balance)\n blocked_card = req_block_card.json()\n return blocked_card\n\n def unblock_card(self, card_id):\n access_token = self.get_token()\n headers_balance = {'Authorization': 'bearer' + str(access_token)}\n req_unblock_card = requests.post(url + '/api/cards/' + card_id + '/unblock', headers=headers_balance)\n unblocked_card = req_unblock_card.json()\n return unblocked_card\n","sub_path":"n26/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":5312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"162243788","text":"\nimport cv2 # Importing the opencv\nimport numpy as np # Import Numarical Python\nimport NameFind # Import NameFind function\nWHITE = [255, 255, 255]\n\n\nface_cascade = cv2.CascadeClassifier('Haar/haarcascade_frontalface_default.xml')\neye_cascade = cv2.CascadeClassifier('Haar/haarcascade_eye.xml') # Classifier \"eye\" Haar Cascade\n\nvideo = input(\"Video: \")\n\nID = NameFind.AddName()\ncounterFrames = 0;\ncap = cv2.VideoCapture(\"videos/\"+video+\".mp4\") \n\nwhile(counterFrames < 50): #quando chegar ao nonagesimo frame, para\n print(counterFrames)\n ret, img = cap.read()\n\n #frame nao pode ser obtido? entao sair\n \n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n olhos = eye_cascade.detectMultiScale(gray, 1.3, 5)\n\n #se nenhuma face for achada, continue\n if not np.any(faces):\n continue\n\n #achou uma face? recorte ela (crop)\n for (x, y, w, h) in faces:\n rostoImg = img[y:y+h, x:x+w]\n\n for (x, y, w, h) in olhos:\n olhosImg = img[y:y+h, x:x+w]\n\n #imagens muito pequenas s�o desconsideradas\n larg, alt, _ = rostoImg.shape\n if(larg * alt <= 20 * 20):\n continue\n \n largo, alto, _ = olhosImg.shape\n if(largo * alto <= 20 * 20):\n continue\n\n #salva imagem na pasta\n rostoImg = cv2.resize(rostoImg, (255, 255))\n olhosImg = cv2.resize(olhosImg, (255, 255))\n cv2.imwrite(\"data/User.\" + str(ID) + \".\" + str(counterFrames) + \".jpg\", rostoImg)\n #cv2.imwrite(\"data/User.\" + str(ID) + \".\" + str(counterFrames) + \".jpg\", olhosImg)\n counterFrames += 1\ncap.release() # Camera object\n\n","sub_path":"capturar.py","file_name":"capturar.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"308092283","text":"from typing import Union\n\nfrom deeplodocus.core.metrics import Losses, Metrics\nfrom deeplodocus.data.load.dataset import Dataset\nfrom deeplodocus.flags import *\nfrom deeplodocus.utils.generic_utils import ProgressBar\nfrom deeplodocus.utils.notification import Notification\nfrom deeplodocus.core.inference import Inferer\n\n\nclass Tester(Inferer):\n\n def __init__(\n self,\n dataset: Dataset,\n model,\n transform_manager,\n losses: Losses,\n metrics: Union[Metrics, None] = None,\n batch_size: int = 32,\n num_workers: int = 1,\n shuffle: Flag = DEEP_SHUFFLE_NONE,\n name: str = \"Tester\"\n ):\n super(Tester, self).__init__(\n dataset, model, transform_manager, losses,\n metrics=metrics,\n batch_size=batch_size,\n num_workers=num_workers,\n shuffle=shuffle,\n name=name\n )\n self.progress_bar = None\n\n def evaluate(self, silent: bool = False, progress_bar: Union[ProgressBar, bool] = True, prefix: str = \"Evaluation :\"):\n self.evaluation_start(silent=silent, progress_bar=progress_bar, prefix=\"DEEP PROGRESS : %s\" % prefix)\n for batch in self.dataloader:\n self.evaluation_batch(batch)\n return self.evaluation_end(silent=silent)\n\n def evaluation_start(self, silent: bool = False, progress_bar: bool = False, prefix: str = \"Evaluation :\"):\n if progress_bar is True:\n self.progress_bar = ProgressBar(self.get_num_batches(), prefix=prefix)\n elif isinstance(progress_bar, ProgressBar):\n self.progress_bar = progress_bar\n if not silent:\n Notification(DEEP_NOTIF_INFO, DEEP_MSG_EVALUATION_STARTED)\n self.model.eval() # Put model into evaluation mode\n self.losses.reset(self.dataset.type) # Reset corresponding losses\n self.metrics.reset(self.dataset.type) # Reset corresponding metrics\n\n def evaluation_end(self, silent: bool = False):\n self.transform_manager.finish() # Call finish on all output transforms\n loss, losses = self.losses.reduce(self.dataset.type) # Get total loss and mean of each loss\n metrics = self.metrics.reduce(self.dataset.type) # Get total metric values\n if not silent:\n Notification(DEEP_NOTIF_SUCCESS, DEEP_MSG_EVALUATION_FINISHED)\n Notification(DEEP_NOTIF_RESULT, self.compose_text(loss, losses, metrics))\n return loss, losses, metrics\n\n def evaluation_batch(self, batch):\n inputs, labels, additional_data = self.clean_single_element_list(batch)\n\n # Send data to device\n inputs = self.to_device(inputs, self.model.device)\n labels = self.to_device(labels, self.model.device)\n additional_data = self.to_device(additional_data, self.model.device)\n\n # Forward pass\n with torch.no_grad():\n outputs = self.model(*inputs)\n\n # Detach the tensor from the graph\n outputs = self.detach(outputs)\n\n # Compute losses\n self.losses.forward(\n flag=self.dataset.type,\n outputs=outputs,\n labels=labels,\n inputs=inputs,\n additional_data=additional_data,\n model=self.model\n )\n\n # Output transforms\n outputs = self.transform_manager.transform(\n outputs=outputs,\n inputs=inputs,\n labels=labels,\n additional_data=additional_data,\n model=self.model\n )\n\n # Compute metrics\n self.metrics.forward(\n self.dataset.type,\n outputs=outputs,\n inputs=inputs,\n labels=labels,\n additional_data=additional_data,\n model=self.model\n )\n\n # Print\n if self.progress_bar is not None:\n self.progress_bar.step()\n","sub_path":"deeplodocus/core/inference/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"155247271","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/lib/python3.6/dist-packages/pyxrd/mixture/models/insitu_behaviours/insitu_behaviour.py\n# Compiled at: 2020-03-07 03:51:50\n# Size of source mod 2**32: 2477 bytes\nfrom mvc.models.properties import StringProperty\nfrom pyxrd.generic.io.custom_io import storables, Storable\nfrom pyxrd.generic.models.base import DataModel\nfrom pyxrd.refinement.refinables.mixins import RefinementGroup\n\n@storables.register()\nclass InSituBehaviour(DataModel, RefinementGroup, Storable):\n __doc__ = '\\n Interface class for coding in-situ behaviour scripts.\\n Sub-classes should override or implement the methods below.\\n '\n\n class Meta(DataModel.Meta):\n store_id = 'InSituBehaviour'\n concrete = False\n\n mixture = property(DataModel.parent.fget, DataModel.parent.fset)\n\n @property\n def refine_title(self):\n return 'In-situ behaviour'\n\n @property\n def refine_descriptor_data(self):\n return dict(phase_name=(self.phase.refine_title),\n component_name='*')\n\n name = StringProperty(default='New Behaviour',\n text='Name',\n visible=True,\n persistent=True,\n tabular=True)\n\n def __init__(self, *args, **kwargs):\n my_kwargs = (self.pop_kwargs)(kwargs, *[prop.label for prop in InSituBehaviour.Meta.get_local_persistent_properties()])\n (super(InSituBehaviour, self).__init__)(*args, **kwargs)\n kwargs = my_kwargs\n with self.data_changed.hold():\n self.name = self.get_kwarg(kwargs, self.name, 'name')\n\n def apply(self, phase):\n if not phase is not None:\n raise AssertionError('Cannot apply on None')\n elif not self.is_compatible_with(phase):\n raise AssertionError('`%r` is not compatible with phase `%r`' % (self, phase))\n\n def is_compatible_with(self, phase):\n return False","sub_path":"pycfiles/PyXRD-0.8.4.linux-x86_64.tar/insitu_behaviour.cpython-36.py","file_name":"insitu_behaviour.cpython-36.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"302713683","text":"import Core\nimport datetime\nfrom Core import Entities\nfrom pony import orm\n\n\nclass Service:\n\n @classmethod\n def device_connection_info(cls, id:int):\n returnValue = None\n if not id == None and id > 0:\n with orm.db_session:\n returnValue = orm.select(d for d in Entities.Device if d.id == id).first();\n\n if returnValue == None:\n returnValue = {}\n\n return returnValue\n\n @classmethod\n def save(cls, data):\n if not isinstance(data, Core.Result): raise Exception(\"Need Result\")\n with orm.db_session:\n patient = Core.Patient()\n patient.is_deleted = False\n patient.gender = Core.Gender.Woman\n patient.name = \"Özlem\"\n patient.surname = \"Barış\"\n patient.birth_date = datetime.date.today()\n Core.db.commit()\n #print(Core.Patient[1])\n #res = Core.Patient.select_by_sql(\"select * from Patient\")\n\n @classmethod\n def save_all(cls, datas):\n if not isinstance(datas, list): raise Exception(\"Need Array\")\n\n\n\n for result in datas:\n if not isinstance(result, Core.Result): raise Exception(\"Need Result Type\")\n cls.save(result)\n","sub_path":"Device/Core/Service.py","file_name":"Service.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"407813088","text":"import random\nfrom database.cat_facts import catfacts\nfrom database.reactions import reaction\nfrom database.images import cruise, lasagna\n#Make this one database\n\nbulli = ['nerd', 'shut up', 'kys', 'nurd', 'loser', 'git gud', 'noob', 'newb', 'n00b', 'stupid', 'idiot', 'dumb', 'dum',\n 'shut it', 'up shut', 'idot', 'ideot', 'baka', 'retard']\n\nleague = ['pat', 'lea', 'gue', 'league', 'legends']\n\nNUMBER = 0\ndef create_response(sentence):\n global NUMBER\n msg = {}\n \n #Limited Responses\n if \"no\" in sentence:\n NUMBER+=1\n if NUMBER%3 == 0:\n msg = \"no u\"\n if \"911\" in sentence:\n NUMBER+=1\n if NUMBER%2 == 0:\n msg = '911'\n if \"awoo\" in sentence:\n NUMBER+=1\n if NUMBER%2 == 0:\n msg = \"Awooo\"\n if 'fite' in sentence:\n NUMBER+=1\n if NUMBER%2 == 0:\n msg = \"fite me\"\n if 'tulta' in sentence:\n NUMBER+=1\n if NUMBER%2 == 0:\n msg = 'tulta'\n \n #Responses\n if 'satan' in sentence:\n msg = 'Thats me.'\n if any(word in sentence for word in bulli):\n msg = \"pls no bulli\"\n if 'league' in sentence:\n msg = 'No, David.'\n if len(sentence) > 220:\n msg = \"lol do you really expect me to read that?\"\n \n #Disable Response\n if '..' in sentence:\n msg = {} \n\n #Commands\n if sentence == '!count':\n NUMBER +=1\n msg = NUMBER\n if sentence == '!roll20':\n num = ((random.randint(0,19))+1)\n msg = num\n if sentence == '!roll6':\n num = ((random.randint(0,5))+1)\n msg = num\n if sentence == '!lasagna':\n num = random.randint(0,(len(lasagna)-1))\n msg = lasagna[num]\n if sentence == '!cruise':\n num = random.randint(0,(len(cruise)-1))\n msg = cruise[num]\n if sentence == '!catfacts':\n num = random.randint(0, (len(catfacts)-1))\n msg = catfacts[num]\n if sentence == '!thanks':\n msg = reaction[0]\n if sentence == '!dog':\n msg = reaction[1]\n if sentence == '!confuse':\n msg = reaction[2]\n if sentence == '!stop':\n msg = reaction[3]\n if sentence == '!obama':\n msg = reaction[4]\n if sentence == '!re':\n msg = reaction[5]\n if sentence == '!whatever':\n msg = reaction[6]\n if sentence == '!weeb':\n msg = reaction[7]\n if sentence == '!wth':\n msg = reaction[8]\n if sentence == '!shark':\n msg = reaction[9]\n\n return msg\n","sub_path":"database/ai.py","file_name":"ai.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"632296927","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\ny = np.linspace(0, 2.0*np.pi, 101)\r\nx1 = np.sin(y)\r\nx2 = np.sinh(y)\r\nynumbers = np.linspace(0, 7, 15)\r\nxnumbers1 = np.linspace(-1, 1, 11)\r\nxnumbers2 = np.linspace(0, 300, 7)\r\nfig, ax1 = plt.subplots()\r\nax2 = ax1.twiny() # ax2 and ax1 will have common y axis and different x axis\r\ncurve1, = ax1.plot(x1, y, label=\"sin\", color='r')\r\ncurve2, = ax2.plot(x2, y, label=\"sinh\", color='b')\r\ncurves = [curve1, curve2]\r\nax2.legend(curves, [curve.get_label() for curve in curves]) # also valid\r\nax1.set_xlabel(\"Magnitude\", color=curve1.get_color())\r\nax2.set_xlabel(\"Magnitude\", color=curve2.get_color())\r\nax1.set_ylabel(\"Angle/Value\", color=curve1.get_color())\r\nax1.tick_params(axis='y', colors=curve1.get_color())\r\nax1.tick_params(axis='x', colors=curve1.get_color())\r\nax2.tick_params(axis='x', colors=curve2.get_color())\r\nax1.set_xticks(xnumbers1)\r\nax2.set_xticks(xnumbers2)\r\nax1.set_yticks(ynumbers)\r\nax1.grid(color=curve2.get_color())\r\nax2.grid(color=curve2.get_color())\r\nax1.xaxis.grid(False)\r\nplt.title(\"Plot of sine and hyperbolic sine\")\r\nplt.show()\r\n","sub_path":"sine.py","file_name":"sine.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"633611968","text":"#!/usr/bin/env python\n\nimport numpy, os\nfrom icecube.photospline import I3SplineTable\nfrom icecube.photospline.splinetable import SplineTable\nfrom icecube.photospline.splinefitstable import write\n\nfname = \"constant.fits\"\n\n# make a constant spline surface\nspline = SplineTable()\nspline.ndim = 2\nspline.order = [2 for i in range(spline.ndim)]\nspline.knots = [numpy.linspace(0, 1, 10) for i in range(spline.ndim)]\nnsplines = tuple(knots.size-order-1 for knots,order in zip(spline.knots, spline.order))\nspline.coefficients = numpy.ones(nsplines)\n\nif os.path.exists(fname):\n\tos.unlink(fname)\nwrite(spline, fname)\n\nx = [0.5]*spline.ndim\n\nspline = I3SplineTable(fname)\n\nassert spline.eval(x) == spline.eval(x, [0,0]) == 1.\n\n# all second derivatives must be zero\nfor i in range(len(x)):\n\tderivs = [0]*2\n\tderivs[i] = 2\n\tassert spline.eval(x, derivs) == 0.\n\nos.unlink(fname)\n","sub_path":"photospline/resources/test/deriv2.py","file_name":"deriv2.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"182733246","text":"# Status Check of EC2 Instances\n\nimport boto3\nimport sys\nfrom array import array\n\nmyec2 = boto3.client('ec2', aws_access_key_id = 'MyAccessKey',\naws_secret_access_key = 'MySecretAccessKey', region_name = 'MyRegion' )\n\nmydevInstances = ['i-xxxxxxxxxxxxxxxxx']\n\nfor vm in mydevInstances:\n print(vm+' '+'is selected')\n rango = myec2.describe_instance_status(\n Filters=[ {\n 'Name':'instance-state-name', \n 'Values':['running','stopped'] \n },\n ],\n InstanceIds=[ vm ])\n print(rango)\nelse:\n print(\"No EC2 Instances found in the region\")\n","sub_path":"aws_ec2/ec2_StatusCheck.py","file_name":"ec2_StatusCheck.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"630151580","text":"''' Prepare KITTI data for 3D object detection.\n\nAuthor: Charles R. Qi\nDate: September 2017\n'''\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tqdm # for progress bar\nimport cPickle as pickle\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = os.path.dirname(BASE_DIR)\nsys.path.append(BASE_DIR)\nimport kitti_util as utils\nfrom kitti_object import *\n\n\ndef gen_data(idx_filename, save_to, split, type_whitelist, min_box_height = 20):\n ''' Extract point clouds and corresponding annotations in frustums\n defined generated from 2D bounding boxes\n Lidar points and 3d boxes are in *rect camera* coord system\n (as that in 3d box label files)\n \n Input:\n idx_filename: string, each line of the file is a sample ID\n split: string, either trianing or testing\n type_whitelist: a list of strings, object types we are interested in.\n Output:\n None\n '''\n\n VERBOSE = 0\n REMOVE_REAR_OBJ = 1 # remove objects behind velo\n SAVE_PKL = 1\n dataset = kitti_object(os.path.join(ROOT_DIR, 'dataset/KITTI/object'), split)\n data_idx_list = [int(line.rstrip()) for line in open(idx_filename)]\n\n my_dict = dict()\n if VERBOSE:\n data_idx_range = data_idx_list\n else:\n data_idx_range = tqdm.tqdm(data_idx_list)\n\n for data_idx in data_idx_range:\n objects = dataset.get_label_objects(data_idx)\n calib = dataset.get_calibration(data_idx)\n\n ann_list = []\n for obj_idx in range(len(objects)):\n obj3d = objects[obj_idx]\n if obj3d.type not in type_whitelist:\n continue\n\n # 2D BOX: Get pts rect backprojected \n obj_ctr_cam = obj3d.t # location (x,y,z) in camera coord\n obj_ctr_cam = np.array(obj_ctr_cam)[np.newaxis, ...] # convert to numpy nx3\n obj_ctr_vel = calib.project_ref_to_velo(obj_ctr_cam)[0] # velo coord\n\n # Remove objects behind velo\n if REMOVE_REAR_OBJ and obj_ctr_vel[0] < 0:\n continue\n\n if VERBOSE:\n print(\"idx={:06d}, center={:.1f} {:.1f}\".format(data_idx, obj_ctr_vel[0], obj_ctr_vel[1]))\n\n # collect results (x==forward, y==left, type)\n ann_list.append([obj_ctr_vel[0], obj_ctr_vel[1], obj3d.type])\n\n if ann_list:\n # add to dict\n my_dict['%06d' % (data_idx)] = ann_list\n\n if SAVE_PKL:\n with open(save_to, 'wb') as fp:\n pickle.dump(my_dict, fp)\n\nif __name__ == '__main__':\n type_whitelist = {'Car': 1, 'Van': 1, 'Truck': 1, 'Pedestrian': 2, 'Person_sitting': 2, 'Cyclist': 3}\n kitti_dir = os.path.join(ROOT_DIR, 'dataset/KITTI/object')\n gen_data( \\\n os.path.join(BASE_DIR, 'image_sets/train.txt'),\n os.path.join(kitti_dir, 'train_carpedcyc_gt_bev.pkl'),\n 'training',\n type_whitelist=type_whitelist)\n\n gen_data( \\\n os.path.join(BASE_DIR, 'image_sets/val.txt'),\n os.path.join(kitti_dir, 'val_carpedcyc_gt_bev.pkl'),\n 'training',\n type_whitelist=type_whitelist)\n","sub_path":"kitti/gen_kitti_bev_pos.py","file_name":"gen_kitti_bev_pos.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"422735250","text":"from logger import g_logger\r\nfrom pymem import Pymem\r\nimport pymem\r\nfrom utility import is_window_exist\r\n\r\nprint(f\"\"\"{g_logger.CGREEN2}\r\n _____ _ _ _ _ _______ _ \r\n / ____| | | | | | \\ | | |__ __| (_) \r\n| (___ ___ __ _ _ __| | ___| |_| \\| | _____ ___ _ ___| |_ __ __ _ _ _ __ ___ _ __ \r\n \\___ \\ / __/ _` | '__| |/ _ \\ __| . ` |/ _ \\ \\/ / | | / __| | '__/ _` | | '_ \\ / _ \\ '__|\r\n ____) | (_| (_| | | | | __/ |_| |\\ | __/> <| |_| \\__ \\ | | | (_| | | | | | __/ | \r\n|_____/ \\___\\__,_|_| |_|\\___|\\__|_| \\_|\\___/_/\\_,\\____|___/_|_| \\__,_|_|_| |_|\\___|_| \r\n{g_logger.CEND}\"\"\")\r\n\r\ng_logger.logger(\"Waiting Game Window\")\r\n\r\nif not is_window_exist(\"ScarletNexus\"):\r\n input(\"Press Enter to When Game Is Started\")\r\n\r\npm = Pymem('ScarletNexus-Win64-Shipping.exe')\r\ng_logger.logger(f\"Process id: {hex(pm.process_id).upper()} Process Handle : {str(pm.process_handle)}\")\r\ng_logger.logger(\"Initializing Pointer\")\r\n\r\nclass pattern_scan():\r\n def __init__(self, name, signature) -> None:\r\n self.signature_scan(name, signature)\r\n\r\n def insert_string(self, string, str_to_insert, index):\r\n return string[:index] + str_to_insert + string[index:]\r\n\r\n def signature_scan(self, name, signature):\r\n self.first = signature\r\n self.second = self.insert_string(self.first, r\"\\x\", 0)\r\n self.third = self.second.replace(r\" \", r\"\\x\")\r\n self.final = self.third.replace(r\"\\x?\", r\".\")\r\n base = pymem.process.module_from_name(pm.process_handle, \"ScarletNexus-Win64-Shipping.exe\")#.lpBaseOfDll\r\n self.Pointer = pymem.pattern.pattern_scan_module(pm.process_handle, base, self.final.encode())\r\n self.SignatureName = name\r\n self.SigPattern = signature\r\n return self\r\n\r\n def add(self, offset):\r\n self.Pointer += offset\r\n return self\r\n\r\n def sub(self, offset):\r\n self.Pointer -= offset\r\n return self\r\n\r\n def rip(self):\r\n return self.add(pm.read_int(self.Pointer) + 4)\r\n\r\n def scan(self):\r\n try:\r\n g_logger.logger(f\"{self.SignatureName} : ScarletNexus-Win64-Shipping.exe+{hex(self.Pointer-pm.base_address).upper()}\")\r\n return self.Pointer\r\n except Exception as e:\r\n print(e)\r\n\r\n","sub_path":"memory_scanner.py","file_name":"memory_scanner.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"59564337","text":"\"\"\"\nThis test is similar to the one done by Grabowski and Clark (1991),\nfeaturing a moist thermal rising in an unsaturated atmosphere.\n\"\"\"\nfrom gusto import *\nfrom firedrake import (PeriodicIntervalMesh, ExtrudedMesh,\n SpatialCoordinate, conditional, cos, pi, sqrt, exp,\n TestFunction, dx, TrialFunction, Constant, Function,\n LinearVariationalProblem, LinearVariationalSolver, DirichletBC,\n FunctionSpace, BrokenElement, VectorFunctionSpace, errornorm)\nfrom firedrake.slope_limiter.vertex_based_limiter import VertexBasedLimiter\nimport sys\n\nif '--recovered' in sys.argv:\n recovered = True\nelse:\n recovered = False\n\nif '--diffusion' in sys.argv:\n diffusion = True\nelse:\n diffusion = False\n\ndt = 1.0\nif '--running-tests' in sys.argv:\n tmax = 10.\n deltax = 240.\nelse:\n deltax = 20. if recovered else 40.\n tmax = 600.\n\n\nL = 3600.\nh = 2400.\nnlayers = int(h/deltax)\nncolumns = int(L/deltax)\n\nm = PeriodicIntervalMesh(ncolumns, L)\nmesh = ExtrudedMesh(m, layers=nlayers, layer_height=h/nlayers)\ndegree = 0 if recovered else 1\n\ndirname = 'unsaturated_bubble'\nif recovered:\n dirname += '_recovered'\nif diffusion:\n dirname += '_diffusion'\n\nfieldlist = ['u', 'rho', 'theta']\ntimestepping = TimesteppingParameters(dt=dt, maxk=4, maxi=1)\noutput = OutputParameters(dirname=dirname, dumpfreq=20, dumplist=['u', 'rho', 'theta'],\n perturbation_fields=['theta', 'water_v', 'rho'], log_level='INFO')\nparams = CompressibleParameters()\ndiagnostics = Diagnostics(*fieldlist)\ndiagnostic_fields = [RelativeHumidity(), Theta_e()]\n\nstate = State(mesh, vertical_degree=degree, horizontal_degree=degree,\n family=\"CG\",\n timestepping=timestepping,\n output=output,\n parameters=params,\n diagnostics=diagnostics,\n fieldlist=fieldlist,\n diagnostic_fields=diagnostic_fields)\n\n# Initial conditions\nu0 = state.fields(\"u\")\nrho0 = state.fields(\"rho\")\ntheta0 = state.fields(\"theta\")\nwater_v0 = state.fields(\"water_v\", theta0.function_space())\nwater_c0 = state.fields(\"water_c\", theta0.function_space())\nrain0 = state.fields(\"rain\", theta0.function_space())\nmoisture = [\"water_v\", \"water_c\", \"rain\"]\n\n# spaces\nVu = u0.function_space()\nVt = theta0.function_space()\nVr = rho0.function_space()\nVt_brok = FunctionSpace(mesh, BrokenElement(Vt.ufl_element()))\nx, z = SpatialCoordinate(mesh)\nquadrature_degree = (4, 4)\ndxp = dx(degree=(quadrature_degree))\nphysics_boundary_method = None\n\nif recovered:\n VDG1 = state.spaces(\"DG1\")\n VCG1 = FunctionSpace(mesh, \"CG\", 1)\n Vu_DG1 = VectorFunctionSpace(mesh, VDG1.ufl_element())\n Vu_CG1 = VectorFunctionSpace(mesh, \"CG\", 1)\n\n u_opts = RecoveredOptions(embedding_space=Vu_DG1,\n recovered_space=Vu_CG1,\n broken_space=Vu,\n boundary_method=Boundary_Method.dynamics)\n rho_opts = RecoveredOptions(embedding_space=VDG1,\n recovered_space=VCG1,\n broken_space=Vr,\n boundary_method=Boundary_Method.dynamics)\n theta_opts = RecoveredOptions(embedding_space=VDG1,\n recovered_space=VCG1,\n broken_space=Vt_brok,\n boundary_method=Boundary_Method.dynamics)\n physics_boundary_method = Boundary_Method.physics\n\n# Define constant theta_e and water_t\nTsurf = 283.0\npsurf = 85000.\npi_surf = (psurf / state.parameters.p_0) ** state.parameters.kappa\nhumidity = 0.2\nS = 1.3e-5\ntheta_surf = thermodynamics.theta(state.parameters, Tsurf, psurf)\ntheta_d = Function(Vt).interpolate(theta_surf * exp(S*z))\nH = Function(Vt).assign(humidity)\n\n# Calculate hydrostatic fields\nunsaturated_hydrostatic_balance(state, theta_d, H, pi_boundary=Constant(pi_surf))\n\n# make mean fields\ntheta_b = Function(Vt).assign(theta0)\nrho_b = Function(Vr).assign(rho0)\nwater_vb = Function(Vt).assign(water_v0)\n\n# define perturbation to RH\nxc = L / 2\nzc = 800.\nr1 = 300.\nr2 = 200.\nr = sqrt((x - xc) ** 2 + (z - zc) ** 2)\n\nH_expr = conditional(r > r1, 0.0,\n conditional(r > r2,\n (1 - humidity) * cos(pi * (r - r2)\n / (2 * (r1 - r2))) ** 2,\n 1 - humidity))\nH_pert = Function(Vt).interpolate(H_expr)\nH.assign(H + H_pert)\n\n# now need to find perturbed rho, theta_vd and r_v\n# follow approach used in unsaturated hydrostatic setup\nrho_averaged = Function(Vt)\nrho_recoverer = Recoverer(rho0, rho_averaged, VDG=Vt_brok, boundary_method=physics_boundary_method)\nrho_h = Function(Vr)\nw_h = Function(Vt)\ndelta = 1.0\n\nR_d = state.parameters.R_d\nR_v = state.parameters.R_v\nepsilon = R_d / R_v\n\n# make expressions for determining water_v0\npie = thermodynamics.pi(state.parameters, rho_averaged, theta0)\np = thermodynamics.p(state.parameters, pie)\nT = thermodynamics.T(state.parameters, theta0, pie, water_v0)\nr_v_expr = thermodynamics.r_v(state.parameters, H, T, p)\n\n# make expressions to evaluate residual\npi_ev = thermodynamics.pi(state.parameters, rho_averaged, theta0)\np_ev = thermodynamics.p(state.parameters, pi_ev)\nT_ev = thermodynamics.T(state.parameters, theta0, pi_ev, water_v0)\nRH_ev = thermodynamics.RH(state.parameters, water_v0, T_ev, p_ev)\nRH = Function(Vt)\n\n# set-up rho problem to keep Pi constant\ngamma = TestFunction(Vr)\nrho_trial = TrialFunction(Vr)\na = gamma * rho_trial * dxp\nL = gamma * (rho_b * theta_b / theta0) * dxp\nrho_problem = LinearVariationalProblem(a, L, rho_h)\nrho_solver = LinearVariationalSolver(rho_problem)\n\nmax_outer_solve_count = 20\nmax_inner_solve_count = 10\n\nfor i in range(max_outer_solve_count):\n # calculate averaged rho\n rho_recoverer.project()\n\n RH.assign(RH_ev)\n if errornorm(RH, H) < 1e-10:\n break\n\n # first solve for r_v\n for j in range(max_inner_solve_count):\n w_h.interpolate(r_v_expr)\n water_v0.assign(water_v0 * (1 - delta) + delta * w_h)\n\n # compute theta_vd\n theta0.assign(theta_d * (1 + water_v0 / epsilon))\n\n # test quality of solution by re-evaluating expression\n RH.assign(RH_ev)\n if errornorm(RH, H) < 1e-10:\n break\n\n # now solve for rho with theta_vd and w_v guesses\n rho_solver.solve()\n\n # damp solution\n rho0.assign(rho0 * (1 - delta) + delta * rho_h)\n\n if i == max_outer_solve_count:\n raise RuntimeError('Hydrostatic balance solve has not converged within %i' % i, 'iterations')\n\nwater_c0.assign(0.0)\nrain0.assign(0.0)\n\n# initialise fields\nstate.initialise([('u', u0),\n ('rho', rho0),\n ('theta', theta0),\n ('water_v', water_v0),\n ('water_c', water_c0)])\nstate.set_reference_profiles([('rho', rho_b),\n ('theta', theta_b),\n ('water_v', water_vb)])\n\n# Set up advection schemes\nif recovered:\n ueqn = EmbeddedDGAdvection(state, Vu, equation_form=\"advective\", options=u_opts)\n rhoeqn = EmbeddedDGAdvection(state, Vr, equation_form=\"continuity\", options=rho_opts)\n thetaeqn = EmbeddedDGAdvection(state, Vt, equation_form=\"advective\", options=theta_opts)\n limiter = VertexBasedLimiter(VDG1)\nelse:\n ueqn = VectorInvariant(state, Vu)\n rhoeqn = AdvectionEquation(state, Vr, equation_form=\"continuity\")\n thetaeqn = EmbeddedDGAdvection(state, Vt, equation_form=\"advective\", options=EmbeddedDGOptions())\n limiter = ThetaLimiter(Vt)\n\nu_advection = ('u', SSPRK3(state, u0, ueqn)) if recovered else ('u', ThetaMethod(state, u0, ueqn))\n\nadvected_fields = [u_advection,\n ('rho', SSPRK3(state, rho0, rhoeqn)),\n ('theta', SSPRK3(state, theta0, thetaeqn)),\n ('water_v', SSPRK3(state, water_v0, thetaeqn, limiter=limiter)),\n ('water_c', SSPRK3(state, water_c0, thetaeqn, limiter=limiter)),\n ('rain', SSPRK3(state, rain0, thetaeqn, limiter=limiter))]\n\n# Set up linear solver\nlinear_solver = CompressibleSolver(state, moisture=moisture)\n\n# Set up forcing\ncompressible_forcing = CompressibleForcing(state, moisture=moisture)\n\n# diffusion\nbcs = [DirichletBC(Vu, 0.0, \"bottom\"),\n DirichletBC(Vu, 0.0, \"top\")]\n\ndiffused_fields = []\n\nif diffusion:\n diffused_fields.append(('u', InteriorPenalty(state, Vu, kappa=Constant(60.),\n mu=Constant(10./deltax), bcs=bcs)))\n\n# define condensation\nphysics_list = [Fallout(state), Coalescence(state), Evaporation(state), Condensation(state)]\n\n# build time stepper\nstepper = CrankNicolson(state, advected_fields, linear_solver,\n compressible_forcing, physics_list=physics_list,\n diffused_fields=diffused_fields)\n\nstepper.run(t=0, tmax=tmax)\n","sub_path":"examples/unsaturated_bubble.py","file_name":"unsaturated_bubble.py","file_ext":"py","file_size_in_byte":8963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"390799230","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 24 10:10:31 2016\n\n@author: Alexey\n\"\"\"\n\nhexDigits = \"0123456789ABCDEF\"\nencodesOneByte = \" aehilnorst\"\nENCODES = {'B': \"ABCDEFGHIJKLMNOP\",\n 'C': \"QRSTUVWXYZ012345\",\n 'D': \"6789bcdfgjkmpquv\",\n 'E': \"wxyz+-,!.?:;'*%$\"\n }\n\n\ndef nibbleToChar(b):\n i1 = (b&0xff) + ord('0');\n i2 = (b&0xff) + ord('A') - 10;\n i3 = (b&0xff) + ord('a') - 10;\n if i1 >= ord('0') and i1 <= ord('9'):\n return i1\n elif i2 >= ord('A') and i2 <= ord('F'):\n return i2\n elif i3 >= ord('a') and i3 <= ord('f'):\n return i3\n return ' '\n\n\ndef decodeBytesForStarsString(res):\n result = []\n hexChars = []\n num = []\n for i in xrange(len(res)):\n b = res[i]\n b1 = (b & 0xff) >> 4\n b2 = (b & 0xff) % 16\n hexChars.append(nibbleToChar(b1))\n hexChars.append(nibbleToChar(b2))\n num.extend([b1, b2])\n ipos = 0\n while ipos < len(hexChars):\n firstChar = chr(hexChars[ipos])\n if firstChar in ENCODES:\n ipos = ipos + 1\n b2 = num[ipos]\n result.append(ENCODES[firstChar][b2])\n elif firstChar != 'F':\n index = num[ipos]\n result.append(encodesOneByte[index])\n ipos = ipos + 1\n return ''.join(result)\n\n","sub_path":"util/strings.py","file_name":"strings.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"299209536","text":"from django.conf.urls import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\nimport os\nmedia = os.path.join(\n os.path.dirname(__file__), 'media'\n)\nattach = os.path.join(\n os.path.dirname(__file__), 'attach'\n)\nurlpatterns = patterns('',\n # Examples:\n url(r'^$', 'main.views.welcome', name='welcome'),\n url(r'^about/$', 'main.views.about', name='about'),\n url(r'^brands/$', 'main.views.brands', name='brands'),\n url(r'^brand/(?P\\d+)/$', 'main.views.brand', name='brand'),\n url(r'^product/(?P\\d+)/$', 'main.views.product', name='product'),\n url(r'^sales/$', 'main.views.sales', name='sales'),\n url(r'^sales/(?P\\d+)/$', 'main.views.sales_city', name='sales_city'),\n url(r'^jobs/$', 'main.views.jobs', name='jobs'),\n url(r'^contacts/$', 'main.views.contacts', name='contact'),\n url(r'^knowledge/$', 'main.views.knowledge', name='knowledge'),\n # url(r'^TBES/', include('TBES.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n url(r'^media/(?P.*)$', 'django.views.static.serve', {'document_root': media }),\n url(r'^attach/(?P.*)$', 'django.views.static.serve', {'document_root': attach }),\n url(r'^static/(?P.*)$', 'django.views.static.serve', {'document_root': media }),\n)\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"620330784","text":"# This module contains functions for data analysis, likelihood and forecast\n\nimport numpy as np\n\n\n# aps amplitude histogram\nclass statistics:\n\n def __init__(self,ocl=1.,scl=1.):\n # ocl --- observed cl array like [bin]\n # scl --- simulated cl array like [sim,bin]\n self.ocl = ocl\n self.scl = scl\n self.A = 0.\n self.mA = 0.\n self.sA = 0.\n self.MA = 0.\n self.p = 0.\n self.px2 = 0.\n self.ox2 = 0.\n self.sx2 = 0.\n self.mx2 = 0.\n self.px1 = 0.\n self.ox1 = 0.\n self.sx1 = 0.\n self.mx1 = 0.\n self.onlydiag = False\n\n\n def x2PTE(self,diag=False,verbose=False):\n # compute chi^2 PTE of ocl using scl\n n = len(self.scl[:,0])\n # for real data\n mx = np.mean(self.scl,axis=0)\n dxi = self.scl - mx\n dx0 = self.ocl - mx\n cov = np.cov(dxi,rowvar=0)\n if diag: \n cov = np.diag(np.diag(cov))\n if verbose: print(dx0**2/np.diag(cov))\n oX2 = np.dot(dx0,np.dot(np.linalg.inv(cov),dx0))\n # for sim (exclude self rlz)\n dxi = np.array([self.scl[i,:]-np.mean(np.delete(self.scl,i,0),axis=0) for i in range(n)])\n sX2 = np.array([np.dot(dxi[i,:],np.dot(np.linalg.inv(np.cov(np.delete(dxi,i,0),rowvar=0)),dxi[i,:])) for i in range(n)])\n # output\n self.px2 = (sX2>oX2).sum()/np.float(n)\n self.ox2 = oX2\n self.sx2 = sX2\n self.mx2 = np.mean(sX2)\n\n\n def x1PTE(self,twoside=True):\n # compute chi PTE of ocl using scl\n n = len(self.scl[:,0])\n # for real data\n mx = np.mean(self.scl,axis=0)\n sx = np.std(self.scl,axis=0)\n oX1 = np.sum((self.ocl-mx)/sx)\n # for sim (exclude self rlz)\n dxi = np.array([self.scl[i,:]-np.mean(np.delete(self.scl,i,0),axis=0) for i in range(n)])\n sX1 = np.array([np.sum(dxi[i,:]/np.std(np.delete(self.scl,i,0),axis=0)) for i in range(n)])\n # output\n px1 = (sX1>oX1).sum()/np.float(n)\n if twoside:\n self.px1 = 1.-2*np.abs(px1-0.5)\n else:\n self.px1 = px1\n self.ox1 = oX1\n self.sx1 = sX1\n self.mx1 = np.mean(sX1)\n\n\n def get_amp(self,fcl=None,scale=1.,diag=False,cor='',twoside=True):\n \"\"\"\n Statistics of the amplitude of the power spectrum\n \n Args:\n - statistics\n\n Args(optional):\n - fcl: fiducial cl used to define amplitude array like [bin]\n \"\"\"\n\n # baseline cl (A=1)\n if fcl is None: \n Fcl = np.mean(self.scl,axis=0)*scale\n else:\n Fcl = fcl\n\n # relative amplitude\n amp = self.scl/Fcl\n\n # covariance\n cov = np.cov(amp,rowvar=0)\n cov[np.isnan(cov)] = 0.\n if diag: \n cov = np.diag(np.diag(cov))\n\n if cor!='': #(this was used for pbxhsc as one of tests)\n # replace |Cor|<0.5 elements with random numbers\n Cor = np.corrcoef(amp,rowvar=0)\n c = (np.kron(np.diag(cov),np.diag(cov).T)).reshape((len(fcl),len(fcl)))\n ran = np.random.rand(len(fcl),len(fcl))\n ran = np.tril(ran) \n ran = ran + ran.T -np.diag(ran.diagonal())\n xy = np.where(np.abs(Cor)<.5)\n cov[xy] += (.2*ran[xy]-.1)*c[xy]**0.5\n\n #//// observed amplitude ////#\n wb = np.sum( np.linalg.inv(cov), axis=0 )\n oA = np.sum( wb * self.ocl / Fcl ) / np.sum( wb )\n\n #//// amplitude estimator for sims ////#\n n = len(self.scl[:,0])\n\n # baseline cl\n if fcl is None:\n Fcl = np.array( [ np.mean(np.delete(self.scl,i,0),axis=0) for i in range(n) ] ) * scale\n else:\n Fcl = fcl\n\n # relative amplitude\n amp = self.scl/Fcl\n\n # coefficients\n if diag:\n wbi = np.array( [ np.sum( np.linalg.inv( np.diag(np.diag(np.cov(np.delete(amp,i,0),rowvar=0))) ),axis=0 ) for i in range(n) ] )\n else:\n wbi = np.array( [ np.sum( np.linalg.inv( np.cov(np.delete(amp,i,0),rowvar=0) ),axis=0 ) for i in range(n) ] )\n wti = np.array( [ np.sum(wbi[i,:]) for i in range(n)] )\n\n # amplitude estimates\n A = np.array( [np.sum(wbi[i,:]*amp[i,:])/wti[i] for i in range(n)] )\n mA = np.mean(A)\n sA = np.sqrt(np.var(A))\n\n # output\n self.A = A\n self.mA = mA\n self.sA = sA\n self.oA = oA\n self.p = (A>oA).sum()/np.float(len(A))\n self.MA = np.median(A)\n\n if twoside:\n self.p = 1.-2*np.abs(self.p-0.5)\n\n\n#////////////////////////#\n# Statistics from MC sim #\n#////////////////////////#\n\ndef PTEs(ocb,scb,diag=False,disp=True,x1pte=False,x2pte=True,fpt=2,comment=''):\n\n st = statistics(ocb,scb)\n\n form = '{:.'+str(fpt)+'f}'\n if comment != '':\n com = '('+comment+')'\n else:\n com = ''\n\n if x1pte:\n statistics.x1PTE(st)\n if disp:\n print('chi:',np.around(st.ox1,decimals=1),end=' ')\n print(', chi (sim mean):',np.around(st.mx1,decimals=1),end=' ')\n print(', chi (sim std):',np.around(np.std(st.sx1),decimals=1),end=' ')\n print(', PTE:',form.format(st.px1),com)\n\n if x2pte:\n statistics.x2PTE(st,diag)\n if disp:\n print('chi^2:',np.around(st.ox2,decimals=1),end=' ')\n print(', chi^2 (sim):',np.around(st.mx2,decimals=1),end=' ')\n print(', PTE:',form.format(st.px2),com)\n\n return st\n\n\ndef amplitude(ocb,scb,fcb=None,diag=False,disp=True):\n st = statistics(ocb,scb)\n statistics.get_amp(st,fcl=fcb,diag=diag)\n if disp:\n print('obs A', np.round(st.oA,3), 'mean(A)', np.round(st.mA,3), 'sigma(A)', np.round(st.sA,3), 'S/N', np.round(1./st.sA,3), 'A>oA', st.p)\n return st\n\n\n\ndef get_corrcoef(scl):\n \"\"\"\n Estimate correlation coefficients of the power spectrum\n \"\"\"\n\n corr = np.corrcoef(scl,rowvar=0)\n return corr\n\n\ndef get_cov(scl,fcl=None,scale=1.,diag=False,cinv=False):\n \"\"\"\n Compute covariance of the power spectrum\n\n Args:\n - statistics\n\n Args(optional):\n - fcl: fiducial cl used to define amplitude array like [bin]\n \"\"\"\n\n # mean\n if fcl is None: fcl = scale\n A = scl/fcl\n\n # covariance\n cov = np.cov(A,rowvar=0)\n cov[np.isnan(cov)] = 0.\n if diag: cov = np.diag(np.diag(cov))\n if cinv: cov = np.linalg.inv(cov)\n return cov\n\n\n# Optimal Combination\n\ndef combine(ocl0,ocl1,scl0,scl1,bnum):\n # combining two binned cl data at each multipole bin\n vcl0 = np.std(scl0,axis=0)\n vcl1 = np.std(scl1,axis=0)\n vclx = np.array( [ np.cov(scl0[:,b],scl1[:,b])[0,1] for b in range(bnum)] )\n g0 = vcl1**2 - vclx\n g1 = vcl0**2 - vclx\n vcl = (g0*ocl0+g1*ocl1) / (vcl0**2+vcl1**2-2*vclx)\n return vcl\n\n\ndef opt_weight(x,low=-1.,diag=False):\n # optimal weighting\n # x --- data like [sim,bin]\n cov = np.cov(x,rowvar=0)\n if diag: cov = np.diag(np.diag(cov)) # set off-diag to zero\n cov[np.isnan(cov)] = 0.\n if low > -1.: \n corr = get_corrcoef(x)\n cov[corr=1.] = 1.\n y = np.sqrt(x)\n return y - np.sin(2*np.pi*y)/(2*np.pi)\n\n\ndef apofunc_flat(mapsize,s,aposcale):\n # apodization window\n # mapsize --- unit in rad\n # s --- coordinates\n # aposcale --- scale of apodization in deg\n a = mapsize*.5\n ss = abs(s)/a\n x = (1.-ss)/(1.-aposcale)\n x[x>=1.] = 1.\n x[x<=0.] = 0.\n return x - np.sin(2*np.pi*x)/(2*np.pi)\n\n\ndef window_2d(nx,ny,Dx,Dy,aposcale):\n sx = Dx/nx\n sy = Dy/ny\n xi = (np.linspace(0,nx,nx)-1.-nx*0.5)*sx\n xj = (np.linspace(0,ny,ny)-1.-ny*0.5)*sy\n Wx = apofunc_flat(Dx,xi,aposcale)\n Wy = apofunc_flat(Dy,xj,aposcale)\n return np.outer(Wx,Wy)\n\n\n# Absolute Angle Estimator\n\ndef est_absangle(oCX,sCX,oCY,sCY,fcl=1.,disp=True,diag=False,x1pte=False,x2pte=True):\n\n # estimate amplitude of the cross spectrum\n ocl = oCX/(oCY*2*np.pi/180.)\n scl = sCX/(sCY*2*np.pi/180.)\n st = statistics(ocl,scl)\n statistics.get_amp(st,fcl,diag=diag)\n\n # check PTE of observed cls\n if x1pte:\n statistics.x1PTE(st)\n print('x-PTEs of the spectrum ratio:',np.around(st.px1,decimals=3))\n if x2pte:\n statistics.x2PTE(st,diag=diag)\n print('x^2-PTEs of the spectrum ratio:',np.around(st.px2,decimals=3))\n\n return st\n\n\n\n#////////////////////#\n# Likelihood\n#////////////////////#\n\ndef lnLHL(rx,fcl,icov,bi=None):\n # rx = ocb/scb\n # icov is the covariance of ocb\n bn, bn = np.shape(icov)\n if bi is None:\n gx = np.sign(rx-1.)*np.sqrt(2.*(rx-np.log(rx)-1.))\n else:\n gx = np.zeros(bn)\n gx[bi] = np.sign(rx[bi]-1.)*np.sqrt(2.*(rx[bi]-np.log(rx[bi])-1.))\n return -0.5*np.dot(gx*fcl,np.dot(icov,gx*fcl))\n\n\ndef lnLHLs(rx,fcl,icov,bi=None):\n # rx = ocb/scb\n # icov is the covariance of ocb\n bn, bn = np.shape(icov)\n gx = np.sign(rx-1.)*np.sqrt(2.*(rx-np.log(rx)-1.))\n return -0.5*gx*fcl[bi]*icov[bi,bi]*gx*fcl[bi]\n\n\n# general Gaussian function\ndef lnL_gauss(scov,ocov,method=0):\n \n lmax = len(scov[:,0,0]) - 1\n \n if method == 0:\n scov_i = scov.copy()\n scov_i[2:,:,:] = np.array( [ np.linalg.inv(scov[l,:,:]) for l in range(2,lmax+1) ] )\n Bmat = np.array( [ np.dot( ocov[l,:,:], scov_i[l,:,:] ) for l in range(lmax+1) ] )\n return np.array( [ (2*l+1)*(np.trace(Bmat[l,:,:])-np.log(np.linalg.det(Bmat[l,:,:]))-2.) for l in range(2,lmax+1) ] )\n \n elif method == 1:\n import scipy.linalg as LA\n scov_isq = scov.copy()\n scov_isq[2:,:,:] = np.array( [ LA.inv(LA.sqrtm(scov[l])) for l in range(2,lmax+1) ] )\n Cmat = np.array( [ np.dot( scov_isq[l] , np.dot( ocov[l], scov_isq[l] ) ) for l in range(lmax+1) ] )\n Dii = scov[:,:,0].copy()\n Dii[2:,:] = np.array( [ np.linalg.eigh(Cmat[l])[0] for l in range(2,lmax+1) ] ) \n return np.array( [ (2*l+1)*(np.sum(Dii[l]-np.log(Dii[l]))-len(Dii[l])) for l in range(2,lmax+1) ] )\n\n\n#////////////////////#\n# Fisher matrix\n#////////////////////#\n\n\ndef Fisher_Matrix(L,dCdp=None,iC=None,dlnCdp=None,fsky=1.):\n\n # return fisher matrix\n if dlnCdp is not None:\n s1, s2, ln, pn = dlnCdp.shape\n elif dCdp is not None and iC is not None:\n s1, s2, ln, pn = dCdp.shape\n else:\n print('need either (dlnCdp) or (dCdp and iC)')\n F = np.zeros((pn,pn,ln))\n \n # symmetric in pn x pn\n for i in range(pn):\n for j in range(i,pn):\n if dlnCdp is not None:\n F[i,j,:] = np.array( [ fsky*(L[l]+.5) * np.trace( np.dot(dlnCdp[:,:,l,i],dlnCdp[:,:,l,j]) ) for l in range(ln) ] )\n elif dCdp is not None and iC is not None:\n F[i,j,:] = np.array( [ fsky*(L[l]+.5) * np.trace( np.dot(np.dot(iC[:,:,l],dCdp[:,:,l,i]),np.dot(iC[:,:,l],dCdp[:,:,l,j])) ) for l in range(ln) ] )\n else:\n print('need either (dlnCdp) or (dCdp and iC)')\n F[j,i,:] = F[i,j,:]\n return F\n\n\ndef Fisher_2Dcontour(F,i=0,j=1,display=False):\n \n invF = np.linalg.inv(F)\n \n subF = np.zeros((2,2))\n subF[0,0] = invF[i,i]\n subF[1,1] = invF[j,j]\n subF[0,1] = invF[i,j]\n subF[1,0] = invF[j,i]\n \n lam, v = np.linalg.eig(subF)\n lam = np.sqrt(lam)*1.516575089\n phi = np.rad2deg(np.arctan2(v[1,0],v[0,0]))\n \n if display: print(lam,phi)\n \n return lam, phi\n\n\n\n","sub_path":"utils/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":11837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"398310981","text":"\"\"\"\nStringy Strings\n\nhttp://www.codewars.com/kata/stringy-strings\n\nWrite a function stringy that takes a size\nand returns a string of alternating '1s' and '0s'.\nthe string should start with a 1.\n\na string with size 6 should return :'101010'.\nwith size 4 should return : '1010'.\nwith size 12 should return : '101010101010'.\n\nThe size will always be positive and will only use whole numbers.\n\"\"\"\n\n\ndef stringy(size):\n result = ''\n queue = '10'\n\n for _ in range(size):\n result += queue[0]\n queue = queue[::-1]\n\n return result\n\n\nif __name__ == '__main__':\n print(stringy(3) == '101')\n print(stringy(5) == '10101')\n print(stringy(12) == '101010101010')\n","sub_path":"8_kyu/stringy_strings.py","file_name":"stringy_strings.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"102570204","text":"import shutil\nimport sys\nimport os\nimport re\n\nAPP_PACKAGE_NAME = \"com.plugtester\"\nINFO = \"[ ODR] UA POST BUILD HOOK - info @ petehobson.com/contact\"\n\nprint (INFO)\n\ndef build_android():\n \"\"\"\n android requires:\n - a properties file in the app root\n - values in manifest replacing to your package name NB change com.plugtester to your package\n - shim app class in manifest\n \"\"\"\n fpath = os.path.dirname(os.path.abspath(__file__))\n shutil.copy2(fpath + '/uaplugin/airshipconfig.properties', 'android/assets/airshipconfig.properties')\n shutil.copy2(fpath + '/uaplugin/location.properties', 'android/assets/location.properties')\n\n with open(\"android/AndroidManifest.xml\", \"rw\") as myfile:\n data = myfile.read()\n myfile.close()\n with open(\"android/AndroidManifest.xml\", \"w+b\") as myfile:\n patched = re.sub(r'(ODR_PACKAGE_TOKEN)', APP_PACKAGE_NAME, data)\n patched = re.sub(r'(io.trigger.forge.android.core.ForgeApp)',\n \"io.trigger.forge.android.modules.urbanairship.UAShim\", patched)\n\n myfile.write(patched)\n myfile.close()\n\ndef build_ios():\n \"\"\"\n ios requires:\n - plist config fie in simulator and device packages\n \"\"\"\n fpath = os.path.dirname(os.path.abspath(__file__))\n shutil.copy2(fpath + '/uaplugin/AirshipConfig.plist', 'ios/simulator-ios.app/AirshipConfig.plist')\n shutil.copy2(fpath + '/uaplugin/AirshipConfig.plist', 'ios/device-ios.app/AirshipConfig.plist')\n\n\nif (sys.argv[1] == 'android'):\n build_android()\n\nif (sys.argv[1] == 'ios'):\n build_ios()\n","sub_path":"urbanairship_demoapp/hooks/postbuild/hook.py","file_name":"hook.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"643188108","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport boto3\n\ndef handler(event, context):\n if not 'menu_id' in event:\n raise Exception('Error: Menu id required')\n\n # Get the service resource.\n try:\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('PizzaMenu')\n response = table.delete_item(\n Key = {\n 'menu_id': event['menu_id']\n }\n )\n except Exception as e:\n return \"Error deleting the menu: %s\" % e.message\n return \"OK\"\n","sub_path":"Assignment2/PizzaMenuDelete/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"345508653","text":"import torch\nimport torch.nn as nn\nfrom torchvision.transforms import transforms\nimport numpy as np\nfrom torch.autograd import Variable\nfrom torchvision.models import squeezenet1_1\nimport torch.functional as F\nfrom io import open\nimport os\nfrom PIL import Image\nimport pathlib\nimport glob\n\ntrain_path = 'D:/李佳明/北印/grad_des_proj/grad_des_proj/image_train'\npred_path = 'D:/李佳明/北印/grad_des_proj/grad_des_proj/image_pred'\n\nroot = pathlib.Path(train_path)\nclasses = sorted(j.name.split('/')[-1] for j in root.iterdir())\nprint(classes)\n\nclass ConvNet(nn.Module):\n def __init__(self, num_classes=2):\n super(ConvNet, self).__init__()\n\n # output size after convolution filter\n #((w - f + 2P) / s) + 1\n\n # input shape = (256, 3, 150, 150)\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=12, kernel_size=3, stride=1, padding=1)\n #Shape = (256, 12, 150, 150)\n self.bn1 = nn.BatchNorm2d(num_features=12)\n # Shape = (256, 12, 150, 150)\n self.relu1 = nn.ReLU()\n # Shape = (256, 12, 150, 150)\n\n self.pool = nn.MaxPool2d(kernel_size = 2)\n\n #Reduce the image size be factor 2\n #Shape = (256, 12, 75, 75)\n self.conv2 = nn.Conv2d(in_channels=12, out_channels=20, kernel_size=3, stride=1, padding=1)\n # Shape = (256, 20, 75, 75)\n self.relu2 = nn.ReLU()\n # Shape = (256, 20, 75, 75)\n\n self.conv3 = nn.Conv2d(in_channels=20, out_channels=32, kernel_size=3, stride=1, padding=1)\n # Shape = (256, 32, 75, 75)\n self.bn3 = nn.BatchNorm2d(num_features=32)\n # Shape = (256, 32, 75, 75)\n self.relu3 = nn.ReLU()\n # Shape = (256, 32, 75, 75)\n\n self.fc = nn.Linear(in_features = 32*75*75, out_features = num_classes)\n\n #Feed forward function\n def forward(self, *input):\n output = self.conv1(input[0])\n output = self.bn1(output)\n output = self.relu1(output)\n\n output = self.pool(output)\n\n output = self.conv2(output)\n output = self.relu2(output)\n\n output = self.conv3(output)\n output = self.bn3(output)\n output = self.relu3(output)\n\n #Above output will be i matrix from, with shape(256, 32, 75, 75)\n output = output.view(-1, 32 * 75 * 75)\n\n output = self.fc(output)\n\n return output\n\ncheckpoint = torch.load('best_checkpoint7.model')\nmodel = ConvNet(num_classes = 2)\nmodel.load_state_dict(checkpoint)\nmodel.eval()\n\n#Transforms\ntransformer = transforms.Compose([\n transforms.Resize((150, 150)),\n # transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.5,0.5,0.5], #0-1 to [-1, 1], formula (x - mean) / std\n [0.5,0.5,0.5])\n])\n\n#Prediction function\ndef prediction(img_path, transformer):\n image = Image.open(img_path).convert('RGB')\n image_tensor = transformer(image).float()\n image_tensor = image_tensor.unsqueeze_(0)\n\n input = Variable(image_tensor)\n output = model(input)\n\n index = output.data.numpy().argmax()\n pred = classes[index]\n\n return pred\n\nimages_path = glob.glob(pred_path + '/*/*.jpg')\n\npred_dict = {}\npred_accuracy_list = []\n#pred_accuracy = 0.0\nfor i in images_path:\n res = prediction(i, transformer)\n pred_dict[i[i.rfind('/') + 1:]] = res\n\n a = os.path.dirname(i) # 先获取文件路径\n #print(a)\n image_name = os.path.basename(a) # 从文件路径中读取最后一个文件夹的名字\n if str(res) == str(image_name):\n pred_accuracy_list.append(1)\n else:\n pred_accuracy_list.append(0)\n print('模型预测结果: ', res, '; 无人机实际型号: ', image_name)\n\npred_accuracy = np.sum(pred_accuracy_list) / len(pred_accuracy_list)\nprint('预测正确率: ', pred_accuracy)\n\n#print(pred_dict)\n\n","sub_path":"best_model_prediction.py","file_name":"best_model_prediction.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"98098864","text":"from Turtle import Turtle\r\n\r\ndef A_2(size):\r\n sides = 9\r\n\r\n imagine = Turtle(size/2, 0)\r\n\r\n imagine.penUp()\r\n imagine.setAngle(180)\r\n imagine.right(10)\r\n imagine.penDown()\r\n\r\n for x in xrange(sides):\r\n imagine.forward(size)\r\n imagine.left((360./sides) * 4)\r\n\r\n imagine.export(\"A_2.svg\")\r\n\r\nA_2(200)\r\n","sub_path":"IV122/03/A_2.py","file_name":"A_2.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"647048073","text":"#max et sa position de 20 entiers\r\nmax=int(input('entrez le nombre a1 : \\n'))\r\npos=1\r\nfor i in range(2,21):\r\n print('entrez le nombre a',i,' : ',sep=\"\",end=\"\")\r\n a=int(input('\\n'))\r\n if a>max:\r\n max=a\r\n pos=i\r\nprint()\r\nprint('le max est :',max)\r\nprint('la position est :', pos)\r\ninput()\r\n","sub_path":"TP/TP2/Exo2.py","file_name":"Exo2.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"303372139","text":"from functools import lru_cache\n\n'''\n Example of cache for better performance with algo of fibonachi\n you can test performance with timeit(10, 31) and timeit(25,42)\n'''\n@lru_cache()\ndef fibo(n):\n if n == 1 or n == 2:\n return 1\n return fibo(n - 1) + fibo(n - 2)\n\ndef sum(start, stop):\n result = 0\n for n in range(start, stop):\n result += fibo(n)\n return result\n\nif __name__ == '__main__':\n print(sum(10, 31))\n print(sum(25, 42))","sub_path":"cache/fibo.py","file_name":"fibo.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"652538427","text":"# Copyright 2021, Yahoo\n# Licensed under the terms of the Apache 2.0 license. See the LICENSE file in the project root for terms\nfrom queue import LifoQueue\nfrom random import randint\nfrom unittest import TestCase\n\nfrom ychaos.agents.agent import Agent, AgentConfig, AgentPriority, AgentState\nfrom ychaos.agents.exceptions import AgentError\n\n\nclass MockAgentConfig(AgentConfig):\n name = \"mock_agent\"\n description = \"This is a mock agent used for testing\"\n priority = AgentPriority.VERY_LOW_PRIORITY\n\n\nclass MockAgent(Agent):\n def monitor(self) -> LifoQueue:\n super(MockAgent, self).monitor()\n self._status.put(randint(10, 20))\n\n def setup(self) -> None:\n super(MockAgent, self).setup()\n print(f\"Running {self.__class__.__name__} setup method\")\n\n def run(self) -> None:\n super(MockAgent, self).run()\n print(f\"Running {self.__class__.__name__} run method\")\n\n def teardown(self) -> None:\n super(MockAgent, self).teardown()\n print(f\"Running {self.__class__.__name__} teardown method\")\n\n\nclass MockAgentTeardownRaisesError(MockAgent):\n def teardown(self) -> None:\n super(MockAgentTeardownRaisesError, self).teardown()\n raise Exception(\"Some Error occurred\")\n\n\nclass TestBaseAgent(TestCase):\n def setUp(self) -> None:\n self.mock_agent_config = MockAgentConfig()\n\n def test_agent_setup(self):\n agent = MockAgent(self.mock_agent_config.copy())\n self.assertEqual(agent.current_state, AgentState.INIT)\n\n agent.setup()\n self.assertEqual(agent.current_state, AgentState.SETUP)\n\n def test_agent_run_when_state_matches(self):\n agent = MockAgent(self.mock_agent_config.copy())\n self.assertEqual(agent.current_state, AgentState.INIT)\n\n agent.advance_state(AgentState.SETUP)\n agent.run()\n self.assertEqual(agent.current_state, AgentState.RUNNING)\n\n def test_agent_run_when_state_mismatch_does_not_raise_error_from_config(self):\n agent_config_copy = self.mock_agent_config.copy()\n agent_config_copy.raise_on_state_mismatch = False\n\n agent = MockAgent(agent_config_copy)\n self.assertEqual(agent.current_state, AgentState.INIT)\n\n agent.run()\n self.assertEqual(agent.current_state, AgentState.RUNNING)\n\n def test_agent_run_when_state_mismatch_raises_error_from_config(self):\n agent = MockAgent(self.mock_agent_config.copy())\n self.assertEqual(agent.current_state, AgentState.INIT)\n\n with self.assertRaises(AgentError):\n agent.run()\n\n self.assertEqual(agent.current_state, AgentState.ABORTED)\n\n def test_agent_teardown_async_when_teardown_raises_error(self):\n agent = MockAgentTeardownRaisesError(self.mock_agent_config.copy())\n self.assertEqual(agent.current_state, AgentState.INIT)\n\n t = agent.teardown_async()\n t.join()\n\n self.assertEqual(agent.current_state, AgentState.ERROR)\n\n def test_agent_run_when_is_not_runnable_raises_error(self):\n agent = MockAgent(self.mock_agent_config.copy())\n agent.exception.put(Exception(\"Error\"))\n\n with self.assertRaises(AgentError):\n agent.run()\n\n self.assertEqual(agent.current_state, AgentState.ABORTED)\n","sub_path":"tests/agents/test_agent.py","file_name":"test_agent.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"404142482","text":"import os\nimport re\n\nfrom setuptools import setup, find_packages\n\n\ndef read_version(version_file_name):\n \"\"\"\n Reads the package version from the supplied file\n \"\"\"\n version_file = open(os.path.join(version_file_name)).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", version_file).group(1)\n\nname = 'mental'\nversion = read_version(os.path.join(name,'__init__.py'))\n\nsetup(\n name=name,\n version=version,\n description='Mental poker implementation',\n url='https://github.com/colonelmo/mental-poker',\n download_url='https://github.com/colonelmo/mental/archive/0.1.0.tar.gz',\n author='Mohammad Nasirifar',\n author_email='far.nasiri.m@gmail.com',\n license='BSD',\n keywords='mental poker rsa cryptography',\n packages=find_packages(exclude=[\"*.tests\", \"*.tests.*\", \"tests.*\", \"tests\"]),\n include_package_data=True,\n zip_safe=True,\n install_requires = [],\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n ],\n)\n","sub_path":"pypi_install_script/mental-0.2.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"627534071","text":"import datetime\nfrom urllib.error import URLError\nfrom bs4 import BeautifulSoup\nimport urllib\nfrom urllib import request\nfrom requests import Session\nfrom core.abstract import Record\n\nalpha = 'abcdefghijklmnopqrstuvwxyz'.upper()\n\n\nschedule_headers_map = {\n 'title': 'Original Title', 'date': 'Date', 'channel': 'Channel',\n 'start_time': 'Start Time', 'stop_time': 'Stop Time', 'original_title': 'Original Title',\n 'yop': 'Year of production', 'season': 'Season', 'seasons': 'Seasons', 'total_no_of_ep': 'Total number of episodes',\n 'cop': 'Countries of production ', 'genre': 'Genre', 'subgenre': 'Sub Genre', 'broadcast_lang': 'Broadcast language',\n 'type': 'Series/Movie', 'id': 'ID', 'cast': 'Cast', 'episode': 'Episode'\n}\n\n\"\"\" Maps instance variables to formatted Headers \"\"\"\n\nseries_headers_map = {\n 'title': 'Title', 'cast': 'actors', 'language': 'Broadcast language', 'episode': 'Episode',\n 'season': 'Season', 'type': 'Series/Movie', 'yop': 'Year of Production', 'season_year': 'Season Year'\n}\n\n\nmovie_summary_headers = {\n 'title': 'Title', 'yop': 'Production Date', 'language': 'Language', 'cast': 'Actors', 'type': 'Movie/Series'\n}\n\nseries_summary_headers = {\n 'title': 'Series Name', 'season': 'Season', 'season_year': 'Season year', 'count': 'No. of Episodes',\n 'type': 'Movie/Series'\n}\n\ndriver = None\n\n\ndef print_record_sched(record: Record):\n _print_record_fields(record, schedule_headers_map, 'schedule')\n\n\ndef print_record_summary(record: Record):\n _print_record_fields(record, series_headers_map, 'summary')\n\n\ndef _print_record_fields(record: Record, field_map: {}, type_name: str):\n for k, v in vars(record).items():\n if k in field_map.keys() and len(v):\n print(f\"Set {field_map[k]} : {v} ({type_name})\")\n print(\"\\n\")\n\n\ndef get_current_date_string() -> str:\n n = datetime.datetime.now()\n return n.strftime(\"%m_%d_%y__%H%M\")\n\n\ndef make_soup(input_url):\n try:\n html_headers = {'User-Agent': 'Mozilla/5.0'\n 'KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\n 'Accept-Encoding': 'none',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'X-Requested-With': 'XMLHttpRequest',\n 'Connection': 'keep-alive'}\n req = urllib.request.Request(input_url, headers=html_headers)\n return BeautifulSoup(urllib.request.urlopen(req).read(), \"lxml\")\n except UnicodeEncodeError as e:\n r = Session().get(url=input_url)\n return BeautifulSoup(r.content, \"lxml\")\n except URLError as e:\n return BeautifulSoup('', features='lxml')\n\n\n\n","sub_path":"util/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"210448898","text":"import abc\nimport asyncio\nimport json\nimport logging\nfrom multiprocessing import Process\nfrom multiprocessing import Queue\nfrom queue import Empty\nfrom typing import Collection\nfrom typing import Iterator\nfrom typing import Mapping\nfrom typing import TYPE_CHECKING\n\nimport requests\nimport transitions.extensions\nfrom mypy_extensions import TypedDict\n\nfrom paasta_tools.slack import get_slack_client\ntry:\n from scribereader import scribereader\nexcept ImportError:\n scribereader = None\n\nSLACK_WEBHOOK_STREAM = 'stream_slack_incoming_webhook'\nSCRIBE_ENV = 'uswest1-prod'\nlog = logging.getLogger(__name__)\n\n\ndef get_slack_blocks_for_initial_deployment(message, last_action=None, status=None, active_button=None):\n blocks = [\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": message,\n },\n },\n {\"type\": \"divider\"},\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": f\"Status: {status}\\nLast action: {last_action}\",\n },\n },\n {\n \"type\": \"actions\",\n \"block_id\": \"deployment_actions\",\n \"elements\": get_button_elements([\"rollback\", \"forward\"], active_button=active_button),\n },\n ]\n return blocks\n\n\ndef get_button_element(button, is_active):\n active_button_texts = {\n \"rollback\": \"Rolling Back :zombocom: (Not Impl.)\",\n \"forward\": \"Rolling Forward :zombocom: (Not Impl.)\",\n }\n\n inactive_button_texts = {\n \"rollback\": \"Roll Back :arrow_backward: (Not Impl.)\",\n \"forward\": \"Continue Forward :arrow_forward: (Not Impl.)\",\n }\n\n if is_active is True:\n confirm = False\n text = active_button_texts[button]\n else:\n confirm = get_confirmation_object(button)\n text = inactive_button_texts[button]\n\n element = {\n \"type\": \"button\",\n \"text\": {\n \"type\": \"plain_text\",\n \"text\": text,\n \"emoji\": True,\n },\n \"confirm\": confirm,\n \"value\": button,\n }\n if not confirm:\n del element[\"confirm\"]\n return element\n\n\ndef get_button_elements(buttons, active_button=None):\n elements = []\n for button in buttons:\n is_active = button == active_button\n elements.append(\n get_button_element(button=button, is_active=is_active),\n )\n return elements\n\n\ndef get_confirmation_object(action):\n return {\n \"title\": {\n \"type\": \"plain_text\",\n \"text\": \"Are you sure?\",\n },\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": f\"Did you mean to press {action}?\",\n },\n \"confirm\": {\n \"type\": \"plain_text\",\n \"text\": \"Yes. Do it!\",\n },\n \"deny\": {\n \"type\": \"plain_text\",\n \"text\": \"Stop, I've changed my mind!\",\n },\n }\n\n\nclass ButtonPress():\n def __init__(self, event):\n self.event = event\n self.username = event[\"user\"][\"username\"]\n self.response_url = event[\"response_url\"]\n # TODO: Handle multiple actions?\n self.action = event[\"actions\"][0][\"value\"]\n self.thread_ts = event[\"container\"][\"thread_ts\"]\n self.channel = event[\"channel\"][\"name\"]\n\n def __repr__(self):\n return self.event\n\n def update(self, blocks):\n # Implements responding to button presses\n # https://api.slack.com/messaging/interactivity/enabling#responding-to-interactions\n # But isn't the api_call method per-se\n # https://github.com/slackapi/python-slackclient/issues/270\n requests.post(self.response_url, json={\"blocks\": blocks})\n\n\ndef event_to_buttonpress(event):\n return ButtonPress(event=event)\n\n\ndef parse_webhook_event_json(line):\n event = json.loads(line)\n log.debug(event)\n return event\n\n\ndef is_relevant_event(event):\n # TODO: Implement filtering\n return True\n\n\ndef get_slack_events():\n if scribereader is None:\n return\n\n def scribe_tail(queue):\n host_and_port = scribereader.get_env_scribe_host(SCRIBE_ENV, True)\n host = host_and_port['host']\n port = host_and_port['port']\n tailer = scribereader.get_stream_tailer(SLACK_WEBHOOK_STREAM, host, port)\n for line in tailer:\n queue.put(line)\n\n # Tailing scribe is not thread-safe, therefore we must use a Multiprocess-Queue-based\n # approach, with paasta logs as prior art.\n queue = Queue()\n kw = {'queue': queue}\n process = Process(target=scribe_tail, kwargs=kw)\n process.start()\n while True:\n try:\n line = queue.get(block=True, timeout=0.1)\n event = parse_webhook_event_json(line)\n if is_relevant_event(event):\n yield line\n except Empty:\n pass\n\n\ndef watch_for_slack_webhooks(sc):\n for event in get_slack_events():\n buttonpress = event_to_buttonpress(event)\n followup_message = f\"Got it. {buttonpress.username} pressed {buttonpress.action}\"\n sc.post(channels=[buttonpress.channel], message=followup_message, thread_ts=buttonpress.thread_ts)\n action = buttonpress.action\n blocks = get_slack_blocks_for_initial_deployment(\n message=\"New Message\", last_action=action, status=f\"Taking action on the {action} button\",\n active_button=action,\n )\n buttonpress.update(blocks)\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n sc = get_slack_client()\n watch_for_slack_webhooks(sc)\n\n\nclass TransitionDefinition(TypedDict):\n trigger: str\n source: str\n dest: str\n\n\nclass DeploymentProcess(abc.ABC):\n if TYPE_CHECKING:\n # These attributes need to be defined in this `if TYPE_CHECKING` block, because if they exist at runtime then\n # transitions will refuse to overwrite them.\n state: str\n\n def trigger(self, *args, **kwargs):\n ...\n\n def __init__(\n self,\n ):\n\n self.event_loop = asyncio.get_event_loop()\n self.finished_event = asyncio.Event(loop=self.event_loop)\n\n self.machine = transitions.extensions.LockedMachine(\n model=self,\n states=list(self.states()),\n transitions=list(self.valid_transitions()),\n initial=self.start_state(),\n after_state_change=self.after_state_change,\n queued=True,\n )\n\n @abc.abstractmethod\n def status_code_by_state(self) -> Mapping[str, int]:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def states(self) -> Collection['str']:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def valid_transitions(self) -> Iterator[TransitionDefinition]:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def start_transition(self):\n raise NotImplementedError()\n\n @abc.abstractmethod\n def start_state(self):\n raise NotImplementedError()\n\n def finish(self):\n self.finished_event.set()\n\n def run(self):\n return self.event_loop.run_until_complete(self.run_async())\n\n async def run_async(self) -> int:\n self.trigger(self.start_transition())\n await self.finished_event.wait()\n return self.status_code_by_state().get(self.state, 3)\n\n def after_state_change(self):\n if self.state in self.status_code_by_state():\n self.event_loop.call_soon_threadsafe(self.finished_event.set)\n","sub_path":"paasta_tools/automatic_rollbacks.py","file_name":"automatic_rollbacks.py","file_ext":"py","file_size_in_byte":7490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"426821167","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom math import cos,pi,exp\na=0#float(input(\"donner la valeur de a =\"))\nb=1#float(input(\"donner la valeur de b =\"))\nN=10#int(input(\"donner la valeur de N =\"))\nh=(b-a)/(N+1)\nc=1\nT=h/c\nep=0.005\nr=1/(exp(1/ep)-1)\nexact = lambda x: -r+r*exp(x/ep)\nx=np.linspace(a,b,N+1)\nf=[exact(e) for e in x]\nL=np.zeros([N+1,N+1])\nfor i in range(N): \n L[i][i+1]=1/2\n L[i+1][i]=-1/2\nL[0][0]=-1/2 \nL[N][N]=1/2 \nS=np.zeros([N+1,N+1])\nfor i in range(N):\n S[i][i]=2/h\nfor i in range(N): \n S[i][i+1]=-1/h\n S[i+1][i]=-1/h \nS[0][0]=1/h \nS[N][N]=1/h\nS1=np.zeros((N+1,1))\nS2=np.zeros((N+1,1))\nG=np.dot(ep,S)+c*L\nG[0,:]=0\nG[0,0]=1\nG[N,:]=0\nG[N,N]=1\nS1[0]=0\nS1[N]=1\nfnew=list(np.linalg.solve(G,S1))\nG2=np.dot(ep+T*c*c,S)+c*L\nG2[0,:]=0\nG2[0,0]=1\nG2[N,:]=0\nG2[N,N]=1\nS2[0]=0\nS2[N]=1\nfnew2=list(np.linalg.solve(G2,S2))\nplt.plot(x,fnew,label='G_standar')\nplt.plot(x,fnew2,label='SUPG')\nplt.plot(x,f,label=' Exacte')\nplt.legend()\nplt.grid()\n \n \n \n \n \n \n \n ","sub_path":"cov_diff.py","file_name":"cov_diff.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"561963134","text":"class Solution(object):\n def maximalRectangle(self, matrix):\n \"\"\"\n :type matrix: List[List[str]]\n :rtype: int\n \"\"\"\n n = len(matrix)\n if n == 0:\n return 0\n m = len(matrix[0])\n y = 0\n x = 0\n Hs = [[0 for _ in range(0, m)] for _ in range(0, n)]\n maxA = 0\n while y < n:\n stack = [(-1,0)]\n lastH = 0\n while x < m:\n crtH = 1 + (0 if y == 0 else Hs[y-1][x]) if matrix[y][x] == '1' else 0\n Hs[y][x] = crtH\n index = x\n last = stack[-1]\n while crtH < last[1]:\n w = x - last[0]\n maxA = max(maxA, w * last[1])\n stack.pop()\n index = last[0]\n last = stack[-1]\n stack.append((index, crtH))\n\n x += 1\n while len(stack) > 0:\n last = stack.pop()\n maxA = max((x - last[0]) * last[1], maxA)\n x = 0\n y += 1\n return maxA\n\n\n# test\nfrom random import randint\nimport json\ns = Solution()\nn = 1\nnMax = 10\nmMax = 10\nfor _ in range(0, n):\n h = 500 #randint(0, nMax)\n w = 500 #randint(0, mMax)\n test = [\"\".join([str(randint(0, 1)) for _ in range(0, w)]) for _ in range(0, h)]\n # sample solution needs list with double quotes. silly :-/\n print(json.dumps(test))\n print(s.maximalRectangle(test))","sub_path":"85_maximalRectangle.py","file_name":"85_maximalRectangle.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"158605818","text":"from tkinter import *\nimport datetime\nfrom addpeople import AddPeople\nfrom mypeople import MyPeople\n\n\ndate = datetime.datetime.now().date()\ndate = str(date)\n\n\nclass Application(object):\n def __init__(self, master):\n self.master = master\n # frame\n self.top = Frame(master, height=150, bg='white')\n self.top.pack(fill=X)\n self.bottom = Frame(master, height=500, bg='#0766b5')\n self.bottom.pack(fill=X)\n\n # top frame design\n self.top_image = PhotoImage(file='icons/phone-book.png')\n self.top_image_label = Label(self.top, image=self.top_image, bg='white')\n self.top_image_label.place(x=10, y=10)\n\n self.heading = Label(self.top, text='my phone book app', font='magneto 20 bold', bg='white')\n self.heading.place(x=100, y=30)\n\n self.date_label = Label(self.top, text=\"Today's date : \" + date, font='arial 12 bold', fg='black', bg='white')\n self.date_label.place(x=400, y=35)\n\n # button1 view people\n self.viewButton = Button(self.bottom, text=\"View People\", font='magneto 12 bold', command=self.my_people)\n self.viewButton.place(x=250, y=70)\n\n # button2 add people\n self.addButton = Button(self.bottom, text=\" Add People\", font='magneto 12 bold',command=self.add_people)\n self.addButton.place(x=250, y=130)\n\n # button3 about us\n self.viewButton = Button(self.bottom, text=\" About Us \", font='magneto 12 bold')\n self.viewButton.place(x=250, y=190)\n\n def my_people(self):\n people = MyPeople()\n\n def add_people(self):\n add_page = AddPeople()\n\n\ndef main():\n root = Tk()\n app = Application(root)\n root.title(\"PHONE BOOK APP\")\n root.geometry(\"600x600+20+20\")\n root.resizable(True, True)\n root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"329325961","text":"import sqlite3 as sql\nimport sys\nimport repository\nfrom printdb import printdb\nimport os\n\n\ndef insert_update(entry, cursor):\n cursor.execute(\"\"\"\n INSERT INTO Activities (product_id, quantity,activator_id, date) VALUES (?, ?,?,?)\n \"\"\", [entry[0], entry[1], entry[2], entry[3]])\n cursor.execute(\"\"\"\n UPDATE Products SET quantity=quantity+(?) WHERE id = (?)\n \"\"\", [entry[1], entry[0]])\n\n\ndef update(entry, cursor):\n if int(entry[1]) > 0:\n insert_update(entry, cursor)\n elif int(entry[1]) < 0:\n cursor.execute(\"\"\"\n SELECT quantity FROM Products WHERE id = (?)\n \"\"\", [entry[0]])\n product_quantity = int(cursor.fetchone()[0])\n if product_quantity + int(entry[1]) >= 0:\n insert_update(entry, cursor)\n\n\ndef main(text):\n repo = repository._Repository()\n dbcon = repo._conn\n with dbcon:\n c = dbcon.cursor()\n f = open(text, 'r')\n for line in f:\n entry = line.replace('\\n', '')\n entry = entry.split(', ')\n update(entry, c)\n\n\nif __name__ == '__main__':\n main(str(sys.argv[1]))\n printdb()\n","sub_path":"action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"465766795","text":"# BSD 2-Clause License\n#\n# Copyright (c) 2022, Social Cognition in Human-Robot Interaction,\n# Istituto Italiano di Tecnologia, Genova\n#\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom pyicub.requests import iCubRequest\nfrom pyicub.utils import SingletonMeta, getPyiCubInfo\nfrom flask import Flask, jsonify, request\nimport requests\nimport json\n\nclass iCubRESTService:\n\n def __init__(self, name, robot_name, app_name, url, target, signature):\n\n self.name = name\n self.robot_name = robot_name\n self.app_name = app_name\n self.url = url\n self.target = target\n self.signature = signature\n\nclass iCubRESTServer(metaclass=SingletonMeta):\n\n def __init__(self, rule_prefix, host, port):\n self._services_ = {}\n self._app_services_ = {}\n self._apps_ = {}\n self._robots_ = {}\n self._flaskapp_ = Flask(__name__)\n self._host_ = host\n self._port_ = port\n self._rule_prefix_ = rule_prefix\n self._header_ = \"http://%s:%s\" % (self._host_, self._port_)\n self._flaskapp_.add_url_rule(\"/\", methods=['GET'], view_func=self.info)\n self._flaskapp_.add_url_rule(\"/%s\" % self._rule_prefix_, methods=['GET'], view_func=self.list_robots)\n self._flaskapp_.add_url_rule(\"/%s/register\" % self._rule_prefix_, methods=['POST'], view_func=self.remote_register)\n\n def run_forever(self):\n stop = False\n while not stop:\n try:\n self._flaskapp_.run(self._host_, self._port_)\n stop = True\n except:\n self._port_ += 1\n \n def shutdown(self):\n func = request.environ.get('werkzeug.server.shutdown')\n if func is None:\n raise RuntimeError('Not running with the Werkzeug Server')\n func()\n\n\n def wrapper_target(self, *args, **kwargs):\n rule = str(request.url_rule).strip()\n if request.method == 'GET':\n res = json.dumps(self._services_[rule], default=lambda o: o.__dict__, indent=4)\n return res\n elif request.method == 'POST':\n return self.process_target(self._services_[rule])\n\n def process_target(self, service):\n url = service.url\n data = request.get_json(force=True)\n if 'sync' in request.args:\n url+=\"?sync\"\n res = requests.post(url=url, json=data)\n return res.content\n\n def info(self):\n return jsonify(getPyiCubInfo())\n\n def list_apps(self, robot_name):\n return jsonify(list(self._apps_[robot_name].values()))\n\n def list_robots(self):\n return jsonify(list(self._robots_.values()))\n\n def list_services(self, robot_name, app_name):\n return jsonify(self._app_services_[robot_name][app_name])\n\n def remote_register(self):\n res = request.get_json(force=True)\n self.register(robot_name=res[\"robot_name\"], app_name=res[\"app_name\"], target_name=res[\"target_name\"], target=res[\"target\"], target_signature=res[\"target_signature\"], host=res[\"host\"], port=res[\"port\"])\n return res\n\n def register(self, robot_name, app_name, target_name, target, target_signature, host, port):\n robot_rule = \"/\" + self._rule_prefix_ + \"/\" + robot_name\n app_rule = robot_rule + \"/\" + app_name\n target_rule = app_rule + \"/\" + target_name\n\n if not robot_name in self._app_services_.keys():\n self._app_services_[robot_name] = {}\n self._apps_[robot_name] = {}\n robot = { 'name': robot_name,\n 'url_local': self._header_ + robot_rule,\n 'url_remote': None\n }\n self._robots_[robot_name] = robot\n if not (self._host_ == host and self._port_ == port):\n robot['url_remote'] = \"http://%s:%d\" % (host, port) + robot_rule\n self._flaskapp_.add_url_rule(\"/%s/\" % self._rule_prefix_, methods=['GET'], view_func=self.list_apps)\n if not app_name in self._app_services_[robot_name].keys():\n self._app_services_[robot_name][app_name] = []\n app = { 'name': app_name, \n 'url_local': self._header_ + app_rule,\n 'url_remote': None\n }\n if not (self._host_ == host and self._port_ == port):\n app['url_remote'] = \"http://%s:%d\" % (host, port) + app_rule\n self._apps_[robot_name][app_name] = app\n self._flaskapp_.add_url_rule(\"/%s//\" % self._rule_prefix_, methods=['GET'], view_func=self.list_services)\n self._flaskapp_.add_url_rule(\"/%s///requests\" % self._rule_prefix_, methods=['GET'], view_func=self.app_requests)\n self._flaskapp_.add_url_rule(\"/%s////\" % (self._rule_prefix_), methods=['GET'], view_func=self.req_info)\n service = { 'name': target_name,\n 'url_local': self._header_ + target_rule,\n 'url_remote': None\n }\n if not (self._host_ == host and self._port_ == port):\n service['url_remote'] = \"http://%s:%d\" % (host, port) + target_rule\n\n self._app_services_[robot_name][app_name].append(service)\n \n self._flaskapp_.add_url_rule(\"/%s/%s/%s/%s\" % (self._rule_prefix_, robot_name, app_name, target_name), methods=['GET', 'POST'], view_func=self.wrapper_target)\n service_url = \"http://%s:%d/%s/%s/%s/%s\" % (host, port, self._rule_prefix_, robot_name, app_name, target_name)\n self._services_[target_rule] = iCubRESTService(name=target_name,\n robot_name=robot_name,\n app_name=app_name,\n url=service_url,\n target=target,\n signature=target_signature)\n\n def app_requests(self, robot_name, app_name):\n url = str(request.url).strip()\n params = url.split(\"?\")\n if len(params) > 1:\n params = params[1]\n\n if app_name in self._apps_[robot_name].keys():\n app = self._apps_[robot_name][app_name]\n res = requests.get('%s/requests?%s' % (app['url_remote'], params))\n return jsonify(res.json())\n\n def req_info(self, req_id, robot_name=None, app_name=None, target_name=None):\n if app_name in self._apps_[robot_name].keys():\n app = self._apps_[robot_name][app_name]\n res = requests.get('%s/%s/%s' % (app['url_remote'], target_name, req_id))\n return jsonify(res.json())\n\n\nclass iCubRESTManager(iCubRESTServer):\n\n def __init__(self, icubrequestmanager, rule_prefix, host, port, proxy_host, proxy_port):\n iCubRESTServer.__init__(self, rule_prefix, host, port)\n self._proxy_host_ = proxy_host\n self._proxy_port_ = proxy_port\n self._requests_ = {}\n self.request_manager = icubrequestmanager\n try:\n requests.get('http://%s:%s/%s' % (self._proxy_host_, self._proxy_port_, self._rule_prefix_))\n except:\n self.request_manager.logger.error(\"An issue occurred while connecting to the iCubRESTServer. Are you sure is the server running at %s:%d ?\" % (self._proxy_host_, self._proxy_port_))\n \n\n def app_requests(self, robot_name, app_name):\n reqs = []\n if 'id' in request.args:\n req_id = request.args['id']\n return self.req_info(req_id)\n elif 'pending' in request.args:\n return self.pending_requests()\n for req_id, req in self._requests_.items():\n if robot_name == req['robot_name'] and app_name == req['app_name']:\n reqs.append(req['request'].info())\n return jsonify(reqs)\n\n def req_info(self, req_id, robot_name=None, app_name=None, target_name=None):\n if req_id in self._requests_.keys():\n return jsonify(self._requests_[req_id]['request'].info())\n return jsonify([])\n \n\n def all_requests(self):\n reqs = []\n for req in self._requests_.values():\n reqs.append(req['request'].info())\n return jsonify(reqs)\n\n def pending_requests(self):\n reqs = []\n for req in self._requests_.values():\n if req['request'].status == \"RUNNING\":\n reqs.append(req['request'].info())\n return jsonify(reqs)\n\n def process_target(self, service):\n res = request.get_json(force=True)\n kwargs = res\n if 'sync' in request.args:\n res = service.target(**kwargs)\n return jsonify(res)\n req = self.request_manager.create(timeout=iCubRequest.TIMEOUT_REQUEST, target=service.target, name=service.name, prefix=service.url)\n \n self._requests_[req.req_id] = {'robot_name': service.robot_name,\n 'app_name': service.app_name,\n 'request': req}\n self.request_manager.run_request(req, False, **kwargs)\n return jsonify(req.req_id)\n\n def register_target(self, robot_name, app_name, target_name, target, target_signature):\n self.register(robot_name, app_name, target_name, target, target_signature, self._host_, self._port_)\n data = { \"robot_name\": robot_name,\n \"app_name\": app_name,\n \"target_name\": target_name,\n \"target\": None,\n \"target_signature\": target_signature,\n \"host\": self._host_,\n \"port\": self._port_ }\n\n res = requests.post('http://%s:%s/%s/register' % (self._proxy_host_, self._proxy_port_, self._rule_prefix_), json=data)\n return res.content\n\n\n\n","sub_path":"pyicub/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":11003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"34089030","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\n\nn=int(input('Digite o valor da quantidade de valores de n:'))\n\nl=[]\n\nfor i in range(0,n,1):\n l.append(input('Digite o valores dos termos de n:'))\n media=i//n\ni=i+1\n\nprint(l[0])\nprint(l[n-1])\nprint(media)\nprint(l) \n","sub_path":"moodledata/vpl_data/43/usersdata/77/14127/submittedfiles/mediaLista.py","file_name":"mediaLista.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"340340793","text":"import pandas as pd\r\nimport re\r\nfrom sklearn import preprocessing\r\n\r\npuppy_df = pd.read_excel(\"PuppyInfo.xls\")\r\ntrainer_df = pd.read_excel(\"TrainerInfo.xlsx\")\r\noutcome_df = pd.read_excel(\"PuppyTrainerOutcome.xlsx\")\r\n\r\ndef fix_food_amounts(amounts):\r\n amounts[amounts.astype(float) > 25] = (amounts.astype(float) / 10000)\r\n return amounts\r\n\r\n\r\n# ~~~~~~~~~~~~~~~~Clean the weight~~~~~~~~~~~~~~~~\r\nprint(\"Cleaning weight\")\r\ndigit_regex = \"^(\\d+\\.?\\d*)$\"\r\nlb_regex = \"(\\d+\\.?\\d*)\\s*(?:lb|pound|ibs)\"\r\ninterval_regex = \"(\\d+\\.?\\d*)\\s*-\\s*(\\d+\\.?\\d*)\"\r\nstart_regex = \"^(\\d+\\.?\\d*)\"\r\napprox_regex = \"(?:rox\\.?|~|about|abt|@|#|around|apx|ely|yl|ext|was|>|\\?|te|est)\\s*(\\d+\\.?\\d*)\"\r\n\r\npuppy_df[\"Weight\"] = puppy_df[\"Weight\"].str.replace(\".*twenty four.*\", \"24\", flags=re.IGNORECASE)\r\npuppy_df[\"Weight\"] = puppy_df[\"Weight\"].str.replace(\".*sixty.*\", \"60\", flags=re.IGNORECASE)\r\npuppy_df[\"Weight\"] = puppy_df[\"Weight\"].str.replace(\".*thirty.*\", \"30\", flags=re.IGNORECASE)\r\npuppy_df[\"Weight\"] = puppy_df[\"Weight\"].str.replace(\".*mid seventies.*\", \"75\", flags=re.IGNORECASE)\r\n\r\nclean_df = puppy_df[\"Weight\"].str.extract(digit_regex, expand=False)\r\nclean_df = clean_df.fillna(puppy_df[\"Weight\"].str.extract(lb_regex, flags=re.IGNORECASE, expand=False))\r\nclean_df = clean_df.fillna(puppy_df[\"Weight\"].str.extract(interval_regex, expand=False).astype(float).mean(axis=1))\r\nclean_df = clean_df.fillna(puppy_df[\"Weight\"].str.extract(start_regex, expand=False))\r\nclean_df = clean_df.fillna(puppy_df[\"Weight\"].str.extract(approx_regex, flags=re.IGNORECASE, expand=False))\r\nclean_df = clean_df.fillna(clean_df.astype(float).mean())\r\n\r\npuppy_df[\"Weight\"] = clean_df\r\n\r\n# ~~~~~~~~~~~~~~~~Clean the Age~~~~~~~~~~~~~~~~\r\nprint(\"Cleaning Age\")\r\nwordToNumberMap = {\"one\": \"1\", \"two\": \"2\", \"three\": \"3\", \"four\": \"4\", \"five\": \"5\", \"six\": \"6\", \"seven\": \"7\",\r\n \"eight\": \"8\", \"nine\": \"9\"}\r\nyear_regex = \"(\\d+\\.?\\d*)[\\+\\s\\-]*(?:(\\d)\\/(\\d))?\\s*y\"\r\nmonth_regex = \"(\\d+\\.?\\d*)[\\+\\s\\-]*(?:(\\d)\\/(\\d))?\\s*m\"\r\nweek_regex = \"(\\d+\\.?\\d*)[\\+\\s\\-]*(?:(\\d)\\/(\\d))?\\s*w\"\r\nday_regex = \"(\\d+\\.?\\d*)[\\+\\s\\-]*(?:(\\d)\\/(\\d))?\\s*d\"\r\ndigit_regex = \"^(\\d+\\.?\\d*)$\"\r\n\r\npuppy_df[\"Age\"] = puppy_df[\"Age\"].str.replace(\"a half\", \"1/2\", flags=re.IGNORECASE)\r\npuppy_df[\"Age\"] = puppy_df[\"Age\"].str.replace(\"and\", \"\", flags=re.IGNORECASE)\r\npuppy_df[\"Age\"] = puppy_df[\"Age\"].str.replace(\"a year\", \"1 y\", flags=re.IGNORECASE)\r\npuppy_df[\"Age\"] = puppy_df[\"Age\"].str.replace(\"5\\+\", \"5\", flags=re.IGNORECASE)\r\npuppy_df[\"Age\"] = puppy_df[\"Age\"].str.replace(\"i/2\", \"1/2\", flags=re.IGNORECASE)\r\npuppy_df[\"Age\"] = puppy_df[\"Age\"].str.replace(\"15 months \\(1yr3m\\)\", \"15 m\", flags=re.IGNORECASE)\r\npuppy_df[\"Age\"] = puppy_df[\"Age\"].str.replace(\"14 weeks on 21 March, 16 wks on Apr4\", \"16 w\", flags=re.IGNORECASE)\r\npuppy_df[\"Age\"] = puppy_df[\"Age\"].str.replace(\"41 weeks, approx 9.5 months\", \"41 w\", flags=re.IGNORECASE)\r\n\r\nfor word, number in wordToNumberMap.items():\r\n puppy_df[\"Age\"] = puppy_df[\"Age\"].str.replace(word, number, flags=re.IGNORECASE)\r\n\r\nyear_df = puppy_df[\"Age\"].str.extract(year_regex, flags=re.IGNORECASE, expand=False).astype(float)\r\nyear_df = year_df[0].add(year_df[1].divide(year_df[2], fill_value=1), fill_value=0).multiply(365)\r\nmonth_df = puppy_df[\"Age\"].str.extract(month_regex, flags=re.IGNORECASE, expand=False).astype(float)\r\nmonth_df = month_df[0].add(month_df[1].divide(month_df[2], fill_value=1), fill_value=0).multiply(30)\r\nweek_df = puppy_df[\"Age\"].str.extract(week_regex, flags=re.IGNORECASE, expand=False).astype(float)\r\nweek_df = week_df[0].add(week_df[1].divide(week_df[2], fill_value=1), fill_value=0).multiply(7)\r\nday_df = puppy_df[\"Age\"].str.extract(day_regex, flags=re.IGNORECASE, expand=False).astype(float)\r\nday_df = day_df[0].add(day_df[1].divide(day_df[2], fill_value=1), fill_value=0)\r\ndigit_df = puppy_df[\"Age\"].str.extract(digit_regex, flags=re.IGNORECASE, expand=False).astype(float)\r\ndigit_df[digit_df >= 48] = digit_df[digit_df >= 1.5].multiply(7)\r\ndigit_df[(digit_df < 48) & (digit_df >= 1.5)] = digit_df[(digit_df < 48) & (digit_df >= 1.5)].multiply(30)\r\ndigit_df[digit_df < 1.5] = digit_df[digit_df < 1.5].multiply(365)\r\n\r\nclean_df = year_df.add(month_df, fill_value=0)\r\nclean_df = clean_df.add(week_df, fill_value=0)\r\nclean_df = clean_df.add(day_df, fill_value=0)\r\nclean_df = clean_df.add(digit_df, fill_value=0)\r\n\r\npuppy_df[\"Age\"] = clean_df\r\n# ~~~~~~~~~~~~~~~~RaiserState~~~~~~~~~~~~~~~~~~~~\r\nprint(\"Cleaning State\")\r\npuppy_df[\"RaiserState\"] = puppy_df[\"RaiserState\"].str.strip()\r\npuppy_df[\"RaiserState\"] = puppy_df[\"RaiserState\"].str.replace(\"[\\.`]\",\"\")\r\npuppy_df[\"RaiserState\"] = puppy_df[\"RaiserState\"].str.replace(\"Connecticut\",\"CT\")\r\npuppy_df[\"RaiserState\"] = puppy_df[\"RaiserState\"].str.replace(\"Delaware\",\"DE\")\r\npuppy_df[\"RaiserState\"] = puppy_df[\"RaiserState\"].str.replace(\"Maine\",\"ME\",flags=re.IGNORECASE)\r\npuppy_df[\"RaiserState\"] = puppy_df[\"RaiserState\"].str.replace(\"Maryland\",\"MD\")\r\npuppy_df[\"RaiserState\"] = puppy_df[\"RaiserState\"].str.replace(\"Massachusetts\",\"MD\")\r\npuppy_df[\"RaiserState\"] = puppy_df[\"RaiserState\"].str.replace(\"New\\s+York\",\"NY\")\r\npuppy_df[\"RaiserState\"] = puppy_df[\"RaiserState\"].str.replace(\"New YHork\",\"NY\")\r\npuppy_df[\"RaiserState\"] = puppy_df[\"RaiserState\"].str.replace(\"New Hampshire\",\"NH\")\r\npuppy_df[\"RaiserState\"] = puppy_df[\"RaiserState\"].str.replace(\"New Jersey\",\"NJ\")\r\npuppy_df[\"RaiserState\"] = puppy_df[\"RaiserState\"].str.replace(\"Ohio\",\"OH\",flags=re.IGNORECASE)\r\npuppy_df[\"RaiserState\"] = puppy_df[\"RaiserState\"].str.replace(\"Pennsylvania\",\"PA\",flags=re.IGNORECASE)\r\npuppy_df[\"RaiserState\"] = puppy_df[\"RaiserState\"].str.replace(\"^V$\",\"VA\",flags=re.IGNORECASE)\r\npuppy_df[\"RaiserState\"] = puppy_df[\"RaiserState\"].str.replace(\"Virginia\",\"VA\",flags=re.IGNORECASE)\r\npuppy_df[\"RaiserState\"] = puppy_df[\"RaiserState\"].str.replace(\"Vermont\",\"VT\",flags=re.IGNORECASE)\r\npuppy_df[\"RaiserState\"] = puppy_df[\"RaiserState\"].str.upper()\r\n# ~~~~~~~~~~~~~~~~Clean the FoodAmount~~~~~~~~~~~~~~~~\r\nprint(\"Cleaning FoodAmount\")\r\ndef fix_food_amount(amount):\r\n amount_in_float = float(amount)\r\n if amount_in_float > 25:\r\n return round(amount_in_float / 10000, 2)\r\n return amount_in_float\r\n\r\npuppy_df.loc[:,\"FoodAmount\"] = puppy_df[\"FoodAmount\"].apply(fix_food_amount)\r\n\r\n# ~~~~~~~~~~~~~~~~Clean the NbrOvernights3Mo~~~~~~~~~~~~~~~~\r\nprint(\"Cleaning NbrOvernights3Mo\")\r\ndef transform(nbr):\r\n none_list = [\"none\", \"na\", \"any\", \"yet\", \"zero\", \"0\", \"no\", \"hasn't\", \"haven't\"]\r\n nbr_in_string = str(nbr).strip()\r\n for none in none_list:\r\n if none in nbr_in_string.lower():\r\n return 0\r\n\r\n words = nbr_in_string.replace(\",\", \"\").split(\" \")\r\n for i in range(len(words)):\r\n # week\r\n if words[i] in [\"week\", \"weeks\"]:\r\n return words[i - 1] * 7\r\n # night / day / time\r\n if words[i] in [\"night\", \"nights\", \"overnites\",\r\n \"weekends\", \"weekend\",\r\n \"overnights\", \"overnight\",\r\n \"days\", \"day\",\r\n \"time\", \"times\"]:\r\n return words[i - 1]\r\n # month\r\n if words[i] in [\"month\", \"months\"]:\r\n return words[i - 1] * 30\r\n\r\n # hour\r\n if words[i] in [\"hour\", \"hours\"]:\r\n return 1\r\n # lots / multiple\r\n if words[i] in [\"lots\", \"multiple\"]:\r\n return 90\r\n\r\n # assuming the rest is number\r\n for word in words:\r\n if word.isdigit():\r\n return word\r\n\r\n return nbr\r\n\r\npuppy_df[\"NbrOvernights3Mo\"] = puppy_df[\"NbrOvernights3Mo\"].str.replace(\".*One.*\",\"1\",flags=re.IGNORECASE)\r\npuppy_df[\"NbrOvernights3Mo\"] = puppy_df[\"NbrOvernights3Mo\"].str.replace(\".*two.*\",\"3\",flags=re.IGNORECASE)\r\npuppy_df[\"NbrOvernights3Mo\"] = puppy_df[\"NbrOvernights3Mo\"].str.replace(\".*three.*\",\"3\",flags=re.IGNORECASE)\r\npuppy_df[\"NbrOvernights3Mo\"] = puppy_df[\"NbrOvernights3Mo\"].str.replace(\".*four.*\",\"3\",flags=re.IGNORECASE)\r\npuppy_df[\"NbrOvernights3Mo\"] = puppy_df[\"NbrOvernights3Mo\"].str.replace(\".*First.*\",\"1\",flags=re.IGNORECASE)\r\npuppy_df[\"NbrOvernights3Mo\"] = puppy_df[\"NbrOvernights3Mo\"].str.replace(\".*Twice.*\",\"1\",flags=re.IGNORECASE)\r\npuppy_df.loc[:,\"NbrOvernights3Mo\"] = puppy_df[\"NbrOvernights3Mo\"].apply(transform)\r\n\r\n# ~~~~~~~~~~~~~~~~Clean the NumberClasses3Months~~~~~~~~~~~~~~~~\r\nprint(\"Cleaning NumberClasses3Months\")\r\ndef maxInString(text):\r\n if str(text) == \"\":\r\n return 4\r\n result = re.findall(\"\\d+\",str(text))\r\n if(len(result) == 0) :\r\n return 4\r\n r = max(map(int, result))\r\n if r > 20:\r\n return 8\r\n else:\r\n return r\r\n\r\ndef replaceAll(text):\r\n # p1 = re.compile(\".*\\(d+) classes.*\")\r\n if re.search(\"['all','every','None']\", str(text).strip(), re.IGNORECASE): # all|every|None\r\n return 8\r\n res = re.findall(\"(\\d+) classes\", str(text), re.IGNORECASE)\r\n if len(res) > 0:\r\n return max(map(int, res))\r\n try:\r\n if int(text) == 0:\r\n return 4\r\n except:\r\n pass\r\n return maxInString(text)\r\npuppy_df[\"NumberClasses3Months\"] = map(replaceAll,puppy_df[\"NumberClasses3Months\"])\r\n# print puppy_df[\"NumberClasses3Months\"]\r\n\r\n# ~~~~~~~~~~~~~~~~Remove duplicate personid/puppy pairs~~~~~~~~~~~~~~~~\r\nprint(\"Removing Duplicates\")\r\n# Select the oldest age for each puppy/trainer pair\r\npuppy_df = puppy_df.groupby([\"ogr_DogID\", \"Raiser_psn_PersonID\"]).apply(lambda row: row[row['Age'] == row['Age'].max()])\r\n# Select the highest survey ID for each puppy/trainer pair\r\npuppy_df = puppy_df.groupby([\"ogr_DogID\", \"Raiser_psn_PersonID\"]).apply(\r\n lambda row: row[row['SurveyID'] == row['SurveyID'].max()])\r\n\r\n# ~~~~~~~~~~~~~~~~replace the StatusCode~~~~~~~~~~~~~~~~\r\ndef replaceStatus(i):\r\n if i in [23,25,26,27,55,98,99,121,169]:\r\n return 1\r\n else:\r\n return 0\r\n\r\noutcome_df.dog_SubStatusCode = list(map(replaceStatus, outcome_df.dog_SubStatusCode))\r\n\r\n\r\n# ~~~~~~~~~~~~~~~~set null ratings to average~~~~~~~~~~~~~~~~\r\nprint(\"Setting null ratings to average\")\r\nordinal_columns_of_interest = [\"Health\",\"StoolFirm\",\"EnergyLevel\",\"EliminationInCrate\",\"QuietInCrate\",\"RespondsToCommandKennel\",\"NoInappropriateChewing\",\"Housemanners\",\"LeftUnattended\",\"EliminationInHouse\",\"PlaybitePeople\",\"StealsFood\",\"OnFurniture\",\"BarksExcessively\",\"RaidsGarbage\",\"CounterSurfingJumpOnDoors\",\"JumpOnPeople\",\"FriendlyWAnimals\",\"GoodWKids\",\"GoodWStrangers\",\"WalksWellOnLeash\",\"KnowCommandGetBusy\",\"EliminatesOnRoute\",\"ChasingAnimals\",\"TrafficFear\",\"NoiseFear\",\"Stairs\",\"SitsOnCommand\",\"DownOnCommand\",\"StaysOnCommand\",\"ComeOnLeash\",\"ComeOffLeash\",\"CanGivePills\",\"EarCleaning\",\"NailCutting\",\"AttendsClasses\",\"BehavesWellClass\",\"AttendsHomeSwitches\"]\r\n\r\nfor column in ordinal_columns_of_interest:\r\n puppy_df[column] = puppy_df[column].fillna(puppy_df[column].mean())\r\n\r\n# ~~~~~~~~~~~~~~~~Merge Files~~~~~~~~~~~~~~~~\r\nprint(\"Merging Files\")\r\n\r\npuppy_df[\"GeneralComments\"] = puppy_df[\"GeneralComments\"].str.replace(\"\\n\", \" \")\r\ntrainer_df[\"DayInLife\"] = trainer_df[\"DayInLife\"].str.replace(\"\\n\", \" \")\r\n\r\n# For outer joins\r\n# out_df = pd.merge(outcome_df, puppy_df, how='left', left_on=[\"dog_DogID\", \"ogr_PersonID\"],\r\n# right_on=[\"ogr_DogID\", \"Raiser_psn_PersonID\"])\r\n# out_df = pd.merge(out_df, trainer_df, how='left', left_on=[\"ogr_PersonID\", \"dog_DogID\"],\r\n# right_on=[\"PersonID\", \"dog_DogID\"])\r\n\r\n# out_df.to_csv(\"outcome_left_dog_left_person.csv\", index=False)\r\n# out_df.to_excel(\"outcome_left_dog_left_person.xlsx\", index=False)\r\n\r\nout_df = pd.merge(outcome_df, puppy_df, how='inner', left_on=[\"dog_DogID\", \"ogr_PersonID\"],\r\n right_on=[\"ogr_DogID\", \"Raiser_psn_PersonID\"])\r\nout_df = pd.merge(out_df, trainer_df, how='left', left_on=[\"ogr_PersonID\", \"dog_DogID\"],\r\n right_on=[\"PersonID\", \"dog_DogID\"])\r\n# # ~~~~~~~~~~~~~~~~Removing Commas~~~~~~~~~~~~~~~~\r\nprint(\"Removing Commas\")\r\nfor column in out_df:\r\n if out_df[column].dtype == \"object\":\r\n out_df[column] = out_df[column].astype(str).str.replace(\",\",\"\")\r\n out_df[column] = out_df[column].str.replace(\"nan\",\"\")\r\n\r\n# ~~~~~~~~~~~~~~~~Convert Nominal Columns to integer labels~~~~~~~~~~~~~~~~\r\nle = preprocessing.LabelEncoder()\r\nprint(\"Converting nominal columns to labels\")\r\n\r\nnominal_columns_of_interest = [\"dog_Sex\",\"dbc_DogBreedDescription\",\"dbcc_ColorDescription\",\"Relationship_Description\",\"Color\",\"Sex\",\"GoodAppetite\",\"Region\",\"RaiserState\",\"Breed\"]\r\ndef transform(col,out):\r\n le.fit(col)\r\n out.write(str(dict(zip(le.classes_,le.transform(le.classes_))))+\"\\n\")\r\n return le.transform(col)\r\nwith open(\"mapping.txt\",\"w\") as f:\r\n out_df[nominal_columns_of_interest] = out_df[nominal_columns_of_interest].astype(str).apply(lambda column: transform(column,f))\r\n\r\n\r\n# ~~~~~~~~~~~~~~~~Convert Nominal Columns to integer labels~~~~~~~~~~~~~~~~\r\nprint(\"Writing to file\")\r\nout_df.to_csv(\"outcome_inner_dog_left_person.csv\", index=False)\r\nout_df.to_excel(\"outcome_inner_dog_left_person.xlsx\", index=False)\r\n\r\n","sub_path":"finalmilestone/Bonus/clean_and_merge.py","file_name":"clean_and_merge.py","file_ext":"py","file_size_in_byte":12935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"471753932","text":"#!/usr/bin/env python3\n# Copyright (c) 2019 Bitcoin Association\n# Distributed under the Open BSV software license, see the accompanying file LICENSE.\n\"\"\"\nCheck that transactions that contain op codes in unlock scripts are accepted\nbefore AND rejected after genesis activation with acceptnonstdtxn=1 parameter\nset - test net only.\n\nIn this test (opposed to bsv-genesis-pushonly.py), transactions are sent individually via P2P protocol.\n\"\"\"\nfrom test_framework.test_framework import ComparisonTestFramework\nfrom test_framework.script import CScript, OP_TRUE, OP_ADD, OP_DROP\nfrom test_framework.blocktools import create_transaction\nfrom test_framework.util import assert_equal\nfrom test_framework.comptool import TestManager, TestInstance\nfrom test_framework.mininode import msg_tx\n\nclass BSVGenesisActivationTransactions(ComparisonTestFramework):\n\n def set_test_params(self):\n self.num_nodes = 1\n self.setup_clean_chain = True\n self.genesisactivationheight = 103\n self.extra_args = [['-whitelist=127.0.0.1', '-acceptnonstdtxn=1', '-genesisactivationheight=%d' % self.genesisactivationheight]]\n\n def run_test(self):\n self.test.run()\n\n def assert_accepted_transaction(self, out):\n transaction_op_add = create_transaction(out.tx, out.n, CScript([1, 1, OP_ADD, OP_DROP]), 100000, CScript([OP_TRUE]))\n self.test.connections[0].send_message(msg_tx(transaction_op_add))\n self.check_mempool(self.test.connections[0].rpc, [transaction_op_add])\n\n def assert_rejected_transaction(self, out):\n def on_reject(conn, msg):\n assert_equal(msg.reason, b'mandatory-script-verify-flag-failed (Only non-push operators allowed in signatures)')\n\n transaction_op_add = create_transaction(out.tx, out.n, CScript([1, 1, OP_ADD, OP_DROP]), 100000, CScript([OP_TRUE]))\n self.test.connections[0].cb.on_reject = on_reject\n self.test.connections[0].send_message(msg_tx(transaction_op_add))\n self.test.connections[0].cb.wait_for_reject()\n\n def get_tests(self):\n\n # shorthand for functions\n block = self.chain.next_block\n node = self.nodes[0]\n self.chain.set_genesis_hash( int(node.getbestblockhash(), 16) )\n\n # Create a new block\n block(0)\n\n self.chain.save_spendable_output()\n\n yield self.accepted()\n\n # Now we need that block to mature so we can spend the coinbase.\n test = TestInstance(sync_every_block=False)\n for i in range(100):\n block(5000 + i)\n test.blocks_and_transactions.append([self.chain.tip, True])\n self.chain.save_spendable_output()\n yield test\n\n # collect spendable outputs now to avoid cluttering the code later on\n out = []\n for i in range(100):\n out.append(self.chain.get_spendable_output())\n\n # tip is on height 101\n assert_equal(node.getblock(node.getbestblockhash())['height'], 101)\n self.assert_accepted_transaction(out[0])\n\n self.nodes[0].generate(1)\n\n # tip is on height 102\n assert_equal(node.getblock(node.getbestblockhash())['height'], 102)\n self.assert_rejected_transaction(out[1])\n\n self.nodes[0].generate(1)\n\n # tip is on height 103\n assert_equal(node.getblock(node.getbestblockhash())['height'], 103)\n\n self.assert_rejected_transaction(out[2])\n\nif __name__ == '__main__':\n BSVGenesisActivationTransactions().main()","sub_path":"test/functional/bsv-genesis-pushonly-transactions-acceptnonstd.py","file_name":"bsv-genesis-pushonly-transactions-acceptnonstd.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"475156061","text":"from item import Item\r\n\r\nclass Potion(Item):\r\n \r\n def __init__(self,name):\r\n self.restore_amount = 0\r\n super(self.__class__, self).__init__(name)\r\n if(self.name == \"Potion\"):\r\n self.restore_amount = 20\r\n elif(self.name == \"Super Potion\"):\r\n self.restore_amount = 50\r\n elif(self.name == \"Hyper Potion\"):\r\n self.restore_amount = 200\r\n else:\r\n self.restore_amount = 0\r\n \r\n def invoked(self,player):\r\n \r\n prompt_message=\"\"\r\n i=1\r\n for pokemon in player.pokemons_in_hand:\r\n prompt_message+=str(i)+\":\"+pokemon.name\r\n i+=1\r\n user_selecition = input(\"Please choose a pokemon to use: (1~6)\\n\"+prompt_message+\"\\n\")\r\n \r\n \r\n pokemon=player.pokemons_in_hand[int(user_selecition)-1]\r\n actually_restored_amount=min(pokemon.hp-pokemon.current_hp,self.restore_amount)\r\n pokemon.current_hp+=actually_restored_amount\r\n print(pokemon.name, \"restored\", actually_restored_amount, \"HP!\\n\")\r\n \r\n ","sub_path":"potion.py","file_name":"potion.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"569733197","text":"import os\nimport os.path as op\nfrom flask import Blueprint, render_template, request\nfrom manage import Article, Photo, Category, Talk\nfrom sqlalchemy.event import listens_for\nfrom flask_admin import form\nfrom dump_schema import ArticleSchema, CategorySchema, TalkSchema\n\n\npages = Blueprint(\n 'pages', __name__,\n template_folder='templates'\n)\n\nfile_path = op.join(op.dirname(__file__), 'static/files')\ntry:\n os.mkdir(file_path)\nexcept OSError:\n pass\n\n\n@pages.route(\"/\")\n@pages.route(\"/index\")\ndef hello():\n ctg = request.args.get('ctg', '')\n categories = Category.query.all()\n talks = Talk.query.order_by(Talk.created_at.desc()).limit(5).all()\n all_talks_count = Talk.query.count()\n photos = Photo.query.filter_by(recommended=True).limit(5).from_self().\\\n order_by(Photo.created_at.desc()).all()\n first_talk = Talk.query.order_by(Talk.id.asc()).first()\n first_talk_id = first_talk.id if first_talk else None\n\n return render_template(\n 'index.html.j2',\n body_class=\"index-page\",\n body_id=\"index-page\",\n title=\"Tracy's story\",\n categories=categories,\n talks=enumerate(talks),\n all_talks_count=all_talks_count,\n photos=photos,\n first_talk_id=first_talk_id\n )\n\n\n@pages.route(\"/article/\")\ndef article(article_id):\n article = Article.query.get(article_id)\n return render_template(\n 'articles/show.html.j2',\n body_class=\"article-show\",\n title=article.title,\n article=article\n )\n\n\n@pages.route(\"/articles\")\ndef articles():\n def return_article_dict(a):\n return {\n 'id': a.id,\n 'title': a.title,\n 'desc': a.desc,\n 'created_at': a.created_at,\n 'tags': list(map(lambda t: {'name': t.name}, a.tags))\n }\n\n if request.is_xhr:\n limit = int(request.args.get('limit', 5))\n ctg = request.args.get('ctg', '')\n\n if ctg and ctg != 'all':\n articles = Article.query.\\\n filter_by(published=True).\\\n order_by(Article.updated_at.desc()).\\\n filter(Article.categories.any(name=ctg)).limit(limit)\n else:\n articles = Article.query.\\\n filter_by(published=True).\\\n order_by(Article.updated_at.desc()).limit(limit)\n\n articles = list(map(return_article_dict, articles))\n return ArticleSchema(many=True).dumps(articles)\n else:\n return render_template(\n 'articles/index.html.j2',\n body_class=\"articles-index\",\n body_id=\"articles-index\",\n title=\"Articles\"\n )\n\n\n@pages.route(\"/categories\")\ndef categories():\n categories = Category.query.all()\n return CategorySchema(many=True).dumps(categories)\n\n\n@pages.route('/talks')\ndef talks():\n if request.is_xhr:\n last_id = int(request.args['last_id'])\n talks_schema = TalkSchema(many=True)\n next_talks = Talk.query.\\\n order_by(Talk.id.desc()).\\\n filter(Talk.id < last_id).\\\n limit(5).from_self().\\\n all()\n return talks_schema.dumps(next_talks)\n else:\n page = request.args.get('page', 1, type=int)\n pagination = Talk.query.order_by(Talk.created_at.desc()).\\\n paginate(page, per_page=20, error_out=False)\n talks = pagination.items\n return render_template(\n 'talks.html.j2',\n body_class=\"talks-page\",\n body_id=\"talks-page\",\n title=\"Tracy's talks\",\n talks=talks,\n pagination=pagination\n )\n\n\n@pages.route('/gallery')\ndef gallery():\n photos = Photo.query.from_self().order_by(Photo.created_at.desc()).all()\n photo_id = request.args.get('id', '')\n metas = {\n 'og:title': \"Tracy's gallery\",\n 'og:type': \"website\",\n 'og:url': request.url\n }\n if photo_id:\n photo = Photo.query.get(photo_id)\n desc = photo.desc\n og_title = \"Tracy's gallery | %(desc)s\" % locals()\n metas.update({\n 'og:title': og_title,\n 'og:description': photo.desc,\n 'og:url': request.url\n })\n else:\n metas.update({\n 'og:description': ''\n })\n return render_template(\n 'gallery.html.j2',\n body_class=\"gallery-page\",\n body_id=\"gallery-page\",\n title=\"Tracy's gallery\",\n photos=photos,\n metas=metas\n )\n\n\n@listens_for(Photo, 'after_delete')\ndef del_photo(mapper, connection, target):\n if target.path:\n # Delete image\n try:\n os.remove(op.join(file_path, target.path))\n except OSError:\n pass\n # Delete thumbnail\n try:\n os.remove(op.join(file_path, form.thumbgen_filename(target.path)))\n except OSError:\n pass\n","sub_path":"app/pages.py","file_name":"pages.py","file_ext":"py","file_size_in_byte":4862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"374700349","text":"# -*- coding: utf-8 -*-\nfrom copy import copy\nfrom dataclasses import dataclass\nfrom dataclasses import field\nfrom math import ceil\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom pkg_resources import resource_filename\n\nfrom nata.plots.axes import Axes\n\n\n@dataclass\nclass Figure:\n \"\"\"Container of parameters and child objects (including plotting\\\n backend-related objects) relevant to draw a figure.\n\n Parameters\n ----------\n figsize: ``tuple`` of ``float``, optional\n Tuple containing the width and height of the figure canvas in\n inches. If not provided, defaults to ``(6,4)``.\n\n nrows: ``int``, optional\n Number of rows available for figure axes. If not provided, defaults\n to ``1``.\n\n ncols: ``int``, optional\n Number of columns available for figure axes. If not provided,\n defaults to ``1``.\n\n style: ``{'light', 'dark'}``, optional\n Selection of standard nata style. If not provided, defaults to\n ``'light'``.\n\n fname: ``str``, optional\n Path to file with custom plotting backend parameters.\n\n rc: ``dict``, optional\n Dictionary with custom plotting backend parameters. Overrides\n parameters given in ``fname``.\n\n \"\"\"\n\n figsize: Optional[Tuple[float]] = None\n nrows: Optional[int] = 1\n ncols: Optional[int] = 1\n style: Optional[str] = \"light\"\n fname: Optional[str] = None\n rc: Optional[Dict[str, Any]] = None\n\n # backend objects\n fig: Any = field(init=False, repr=False, default=None)\n\n # child axes objects\n _axes: List[Axes] = field(init=False, repr=False, default_factory=list)\n\n def __post_init__(self):\n # set plotting style\n self.set_style()\n\n # open figure object\n self.open()\n\n def set_style(self):\n if not self.fname:\n self.fname = resource_filename(\n __name__, \"styles/\" + self.style + \".rc\"\n )\n\n # TODO: generalize methods for arbitrary backend\n def open(self):\n with mpl.rc_context(fname=self.fname, rc=self.rc):\n self.fig = plt.figure(figsize=self.figsize)\n\n if self.figsize is None:\n size = self.fig.get_size_inches()\n self.fig.set_size_inches(\n size[0] * self.ncols, size[1] * self.nrows\n )\n\n def close(self):\n plt.close(self.fig)\n\n def reset(self):\n self.close()\n self.open()\n\n def show(self):\n \"\"\"Shows the figure.\"\"\"\n\n with mpl.rc_context(fname=self.fname, rc=self.rc):\n dummy = plt.figure()\n new_manager = dummy.canvas.manager\n new_manager.canvas.figure = self.fig\n\n self.fig.tight_layout()\n plt.show()\n\n def _repr_html_(self):\n \"\"\"Calls :meth:`nata.plots.Figure.show`.\"\"\"\n self.show()\n\n # TODO: generalize this for arbitrary backend\n def save(\n self, path, format: Optional[str] = None, dpi: Optional[float] = 150\n ):\n \"\"\"Saves the figure to a file.\n\n Parameters\n ----------\n path: ``tuple`` of ``float``, optional\n Path in which to store the file.\n\n format: ``str``, optional\n File format, e.g. ``'png'``, ``'pdf'``, ``'svg'``. If not\n provided, the output format is inferred from the extension of\n ``path``.\n\n dpi: ``float``, optional\n Resolution in dots per inch. If not provided, defaults to\n ``150``.\n\n \"\"\"\n with mpl.rc_context(fname=self.fname, rc=self.rc):\n self.fig.tight_layout()\n self.fig.savefig(path, dpi=dpi, bbox_inches=\"tight\")\n\n def copy(self):\n\n self.close()\n\n new = copy(self)\n new.open()\n\n for axes in new._axes:\n axes.fig = new\n\n return new\n\n def add_axes(self, style=dict()):\n\n new_index = len(self._axes) + 1\n\n if new_index > (self.nrows * self.ncols):\n # increase number of rows\n # TODO: really?\n self.nrows += 1\n\n if self.figsize is None:\n size = self.fig.get_size_inches()\n self.fig.set_size_inches(\n size[0], size[1] * self.nrows / (self.nrows - 1)\n )\n\n for axes in self._axes:\n axes.redo_plots()\n\n axes = Axes(fig=self, index=new_index, **style)\n self._axes.append(axes)\n\n return axes\n\n def __mul__(self, other):\n \"\"\"Combines two figures into one by superimposing the plots in axes with\n matching indices.\n \"\"\"\n\n new = copy(self)\n\n for key, axes in new.axes.items():\n\n if key in other.axes:\n for plot in other.axes[key].plots:\n axes.add_plot(plot=plot)\n\n axes.redo_plots()\n\n new.close()\n\n return new\n\n def __add__(self, other):\n \"\"\"Combines two figures into one by adding new axes.\"\"\"\n\n new = self.copy()\n\n new.nrows = ceil((len(new._axes) + len(other._axes)) / new.ncols)\n\n if new.figsize is None:\n size = new.fig.get_size_inches()\n new.fig.set_size_inches(\n size[0], size[1] * new.nrows / (new.nrows - 1)\n )\n\n for axes in new._axes:\n axes.redo_plots()\n\n for axes in other._axes:\n # get a copy of old axes\n new_axes = axes.copy()\n\n # reset parent figure object\n new_axes.fig = new\n\n # redo plots in new axes\n new_axes.index = len(new._axes) + 1\n new_axes.redo_plots()\n\n # add axes to new list\n new._axes.append(new_axes)\n\n new.close()\n\n return new\n\n @property\n def axes(self) -> dict:\n \"\"\"Dictionary of child `nata.plots.Axes` objects, where the key\n to each axes is its ``index`` property\n \"\"\"\n return {axes.index: axes for axes in self._axes}\n\n @classmethod\n def style_attrs(self) -> List[str]:\n return [\n \"figsize\",\n \"nrows\",\n \"ncols\",\n \"style\",\n \"fname\",\n \"rc\",\n ]\n","sub_path":"nata/plots/figure.py","file_name":"figure.py","file_ext":"py","file_size_in_byte":6357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"453295904","text":"import matplotlib.pyplot as plt\nimport os\nimport numpy as np\nfrom argparse import ArgumentParser\nfrom functools import partial\nfrom scipy import stats\nfrom collections import namedtuple, OrderedDict\nfrom typing import Any, Dict, List, Optional\n\nfrom adaptiveleak.utils.constants import POLICIES, ENCODING, SMALL_NUMBER\nfrom adaptiveleak.utils.file_utils import read_json_gz\nfrom adaptiveleak.analysis.plot_utils import COLORS, to_label, geometric_mean, MARKER, MARKER_SIZE, LINE_WIDTH, PLOT_STYLE\nfrom adaptiveleak.analysis.plot_utils import PLOT_SIZE, AXIS_FONT, LEGEND_FONT, TITLE_FONT\nfrom adaptiveleak.analysis.plot_utils import extract_results, iterate_policy_folders, dataset_label\n\n\nNormalizedError = namedtuple('NormalizedError', ['median', 'first', 'third', 'raw'])\n\nMEDIAN_OFFSET_X = 4\nMEDIAN_OFFSET_Y = 5\nMEDIAN_OFFSET_FACTOR = 3\n\n\ndef normalize_errors_for_dataset(date: str, dataset: str, include_skip_rnn: bool) -> Dict[str, NormalizedError]:\n extract_fn = partial(extract_results, field='mae', aggregate_mode=None)\n policy_folders = list(iterate_policy_folders([date], dataset=dataset))\n\n sim_results = {name: res for name, res in map(extract_fn, policy_folders)}\n\n baseline_results = sim_results['uniform_standard']\n energy_budgets = list(sorted(baseline_results.keys()))\n\n normalized: Dict[str, float] = dict()\n \n for policy_name, policy_results in sim_results.items():\n if (len(policy_results) == 0) or any((b not in policy_results) for b in energy_budgets):\n continue\n\n if (not include_skip_rnn) and (policy_name.startswith('skip_rnn')):\n continue\n\n differences = [baseline_results[b] / (policy_results[b] + SMALL_NUMBER) for b in energy_budgets]\n normalized[policy_name] = NormalizedError(median=np.median(differences),\n first=np.percentile(differences, 25),\n third=np.percentile(differences, 75),\n raw=differences)\n\n return normalized\n\n\ndef plot(dataset_results: Dict[str, Dict[str, NormalizedError]], output_file: Optional[str], is_group_comp: bool):\n\n with plt.style.context(PLOT_STYLE):\n fig, ax = plt.subplots(figsize=(PLOT_SIZE[0] * 1.5, PLOT_SIZE[1]))\n\n labels: List[str] = []\n agg_errors: List[float] = []\n\n policy_names = ['adaptive_heuristic', 'adaptive_deviation'] if is_group_comp else POLICIES\n encoding_names = ['single_group', 'group_unshifted', 'pruned', 'group'] if is_group_comp else ['standard', 'padded', 'group']\n\n width = 0.1\n offset = -1 * width * 3\n\n xs = np.arange(len(dataset_results) + 1) # Include the 'All'\n\n # Print the label for the 'Overall' table\n ax.text(MEDIAN_OFFSET_X, MEDIAN_OFFSET_Y - MEDIAN_OFFSET_FACTOR * (offset - width), 'Overall Medians:', fontweight='bold', fontsize=LEGEND_FONT)\n\n for name in policy_names:\n encodings = encoding_names if name not in ('uniform', 'random') else ['standard']\n\n for encoding in encodings:\n policy_name = '{0}_{1}'.format(name, encoding)\n\n median_errors: List[float] = []\n first_errors: List[float] = []\n third_errors: List[float] = []\n raw_errors: List[float] = []\n\n for dataset, policy_results in sorted(dataset_results.items()):\n if (policy_name not in policy_results):\n continue\n\n median_errors.append(policy_results[policy_name].median)\n first_errors.append(policy_results[policy_name].median - policy_results[policy_name].first)\n third_errors.append(policy_results[policy_name].third - policy_results[policy_name].median)\n raw_errors.extend(policy_results[policy_name].raw)\n\n if len(median_errors) == (len(xs) - 1):\n aggregate = np.median(raw_errors)\n\n median_errors.append(aggregate)\n first_errors.append(aggregate - np.percentile(raw_errors, 25))\n third_errors.append(np.percentile(raw_errors, 75) - aggregate)\n\n label_name = name if encoding == 'standard' else policy_name\n \n ax.bar(xs + offset, median_errors, width=width, color=COLORS[policy_name], label=to_label(label_name))\n ax.errorbar(xs + offset, median_errors, yerr=[first_errors, third_errors], color='k', capsize=2, ls='none')\n\n # Annotate the aggregate score\n ax.text(MEDIAN_OFFSET_X, MEDIAN_OFFSET_Y - MEDIAN_OFFSET_FACTOR * offset, '{0}: {1:.2f}'.format(to_label(label_name), aggregate), fontsize=LEGEND_FONT)\n\n offset += width\n\n dataset_names = [dataset for dataset in sorted(dataset_results.keys())]\n dataset_names.append('Overall')\n\n ax.set_xticks(xs)\n ax.set_xticklabels(dataset_names, fontsize=AXIS_FONT - 3)\n ax.set_yticklabels([round(y, 3) for y in ax.get_yticks()], fontsize=AXIS_FONT)\n\n # Add a line to separate the 'All' category\n ax.axvline((xs[-1] + xs[-2]) / 2, ymax=0.4, linestyle='--', color='k')\n\n ax.set_xlabel('Dataset', fontsize=AXIS_FONT)\n ax.set_ylabel('Median Normalized Reciprocal MAE', fontsize=AXIS_FONT)\n ax.set_title('Median Reciprocal MAE Normalized to the Uniform Policy', fontsize=TITLE_FONT)\n\n ax.legend(fontsize=LEGEND_FONT)\n\n if output_file is None:\n plt.show()\n else:\n plt.savefig(output_file, bbox_inches='tight', transparent=True)\n \n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--date', type=str, required=True)\n parser.add_argument('--datasets', type=str, nargs='+', required=True)\n parser.add_argument('--output-file', type=str)\n parser.add_argument('--is-group-comp', action='store_true')\n parser.add_argument('--include-skip-rnn', action='store_true')\n args = parser.parse_args()\n\n print('Num Datasets: {0}'.format(len(args.datasets)))\n print('==========')\n\n dataset_errors: Dict[str, Dict[str, float]] = dict()\n\n for dataset in args.datasets:\n dataset_errors[dataset_label(dataset)] = normalize_errors_for_dataset(date=args.date, dataset=dataset, include_skip_rnn=args.include_skip_rnn)\n\n plot(dataset_errors, output_file=args.output_file, is_group_comp=args.is_group_comp)\n","sub_path":"adaptiveleak/analysis/plot_normalized_error.py","file_name":"plot_normalized_error.py","file_ext":"py","file_size_in_byte":6533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"149046377","text":"import json\nimport logging\nimport sys\nimport traceback\n\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import View\n\nfrom . import EVENTS\nfrom .models import EventManager\n\nlogger = logging.getLogger('gim.hooks.views')\n\n\nclass GithubWebHook(View):\n http_method_names = [u'post', u'head', ]\n\n @csrf_exempt\n def dispatch(self, request, *args, **kwargs):\n return super(GithubWebHook, self).dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n\n event = None\n payload = None\n repository = None\n result = None\n\n try:\n # type of event from gthub\n event = request.META['HTTP_X_GITHUB_EVENT']\n\n if event not in EVENTS:\n return HttpResponse('Event not allowed\\n')\n\n method = getattr(self, 'event_%s' % event, None)\n if method is None:\n return HttpResponse('Event not managed\\n')\n\n payload = json.loads(request.POST['payload'])\n self.event_manager = EventManager(payload['repository'])\n repository = self.event_manager.repository\n if not repository:\n return HttpResponse('Repository not managed\\n')\n\n result = method(payload)\n\n except Exception as e:\n from pprint import pformat\n log_string = 'DeliveryId: %s\\nRepository: %s\\nEvent: %s\\nPayload:\\n%s\\n%s\\n\\n' % (\n request.META.get('HTTP_X_GITHUB_DELIVERY'),\n repository,\n event,\n '-' * 8,\n pformat(payload),\n )\n logger.exception('### Github Hook problem:\\n' + log_string)\n\n if False: # For debug purpose, set it to `True`\n exc_type, exc_value, exc_tb = sys.exc_info()\n response_string = 'Exception:\\n%s\\n%s\\n%s' % (\n '-' * 10,\n '\\n'.join(traceback.format_exception(exc_type, exc_value, exc_tb)),\n log_string,\n )\n else:\n response_string = \"Something went wrong\"\n\n return HttpResponse(response_string, status=500)\n\n return HttpResponse('OK: %s done\\n' % ('Nothing' if result is None else 'Something'))\n\n def event_issues(self, payload):\n return self.event_manager.event_issues(payload['issue'],\n payload.get('action'))\n\n def event_issue_comment(self, payload):\n payload['comment']['issue'] = payload['issue']\n return self.event_manager.event_issue_comment(payload['comment'],\n payload.get('action'))\n\n def event_pull_request(self, payload):\n return self.event_manager.event_pull_request(payload['pull_request'],\n payload.get('action'),\n label=payload.get('label'))\n\n def event_pull_request_review_comment(self, payload):\n return self.event_manager.event_pull_request_review_comment(payload['comment'],\n payload.get('action'))\n\n def event_commit_comment(self, payload):\n return self.event_manager.event_commit_comment(payload['comment'],\n payload.get('action'))\n\n def event_push(self, payload):\n return self.event_manager.event_push(payload,\n payload.get('action'))\n\n def event_status(self, payload):\n return self.event_manager.event_status(payload,\n payload.get('action'))\n","sub_path":"gim/hooks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"14272056","text":"## CONFIG\n# change this to your external ip address for your server\n#(needs to be external to allow tor routing)\nfrom config import *\n\n\n# monkeypatching the things that asyncio needs\nimport subprocess\nsubprocess.PIPE = -1 # noqa\nsubprocess.STDOUT = -2 # noqa\nsubprocess.DEVNULL = -3 # noqa\n\n\nimport asyncio\nimport os\nos.environ['KIVY_EVENTLOOP'] = 'asyncio'\n# loop = asyncio.get_event_loop()\n# loop.set_debug(True)\n\n# imports\nfrom kivy.uix.screenmanager import Screen,ScreenManager\nfrom kivymd.app import MDApp\nfrom kivymd.uix.button import MDFillRoundFlatButton, MDIconButton\nfrom kivymd.uix.toolbar import MDToolbar\nfrom kivymd.uix.screen import MDScreen\nfrom kivymd.uix.dialog import MDDialog\nfrom kivy.lang import Builder\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivymd.theming import ThemeManager\nfrom kivy.properties import ObjectProperty,ListProperty\nimport time,os\nfrom collections import OrderedDict\nfrom functools import partial\nfrom kivy.uix.screenmanager import NoTransition\nfrom kivymd.uix.label import MDLabel\nfrom kivy.uix.widget import Widget\nfrom kivymd.uix.list import OneLineListItem\nfrom kivymd.uix.card import MDCard, MDSeparator\nfrom kivymd.uix.boxlayout import MDBoxLayout\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.metrics import dp\nfrom kivy.properties import NumericProperty\nfrom kivymd.uix.list import * #MDList, ILeftBody, IRightBody, ThreeLineAvatarListItem, TwoLineAvatarListItem, BaseListItem, ImageLeftWidget\nfrom kivy.uix.image import Image, AsyncImage\nimport requests,json\nfrom kivy.storage.jsonstore import JsonStore\nfrom kivy.core.window import Window\nfrom kivy.core.text import LabelBase\nimport shutil,sys\nfrom kivy.uix.image import Image\nimport sys\nsys.path.append(\"..\") # Adds higher directory to python modules path.\nfrom p2p import p2p,crypto,api\nfrom kivy.event import EventDispatcher\nimport threading,asyncio,sys\n\n# raise Exception(str(Window.size))\nWindow.size = WINDOW_SIZE\n# Window.fullscreen = True #'auto'\n\n# with open('log.txt','w') as of:\n# of.write('### LOG ###\\n')\ndef rgb(r,g,b,a=1):\n return (r/255,g/255,b/255,a)\n\nclass MyLayout(MDBoxLayout):\n scr_mngr = ObjectProperty(None)\n post_id = ObjectProperty()\n\n @property\n def app(self):\n if not hasattr(self,'_app'):\n from kivy.app import App\n self._app = App.get_running_app()\n return self._app\n\n def rgb(self,r,g,b,a=1):\n return rgb(r,g,b,a=a)\n\n def change_screen(self, screen, *args):\n self.scr_mngr.current = screen\n\n def change_screen_from_uri(self,uri,*args):\n self.app.uri=uri\n screen_name = route(uri)\n self.app.screen = screen_name\n self.app.log(f'routing to {screen_name}')\n self.scr_mngr.current = screen_name\n \n def view_post(self,post_id):\n self.post_id=post_id\n self.change_screen('view')\n\nclass ProgressPopup(MDDialog): pass\nclass MessagePopup(MDDialog): pass\nclass MyBoxLayout(MDBoxLayout): pass\nclass MyLabel(MDLabel): pass\n\nclass MyToolbar(MDToolbar):\n action_icon_color = ListProperty()\n\n def update_action_bar(self, action_bar, action_bar_items):\n action_bar.clear_widgets()\n new_width = 0\n for item in action_bar_items:\n new_width += dp(48)\n action_bar.add_widget(\n MDIconButton(\n icon=item[0],\n on_release=item[1],\n opposite_colors=True,\n text_color=(self.specific_text_color if not self.action_icon_color else self.action_icon_color),\n theme_text_color=\"Custom\",\n )\n )\n action_bar.width = new_width\n\n def update_action_bar_text_colors(self, instance, value):\n for child in self.ids[\"left_actions\"].children:\n if not self.action_icon_color:\n child.text_color = self.specific_text_color\n else:\n child.text_color = self.action_icon_color\n\n for child in self.ids[\"right_actions\"].children:\n if not self.action_icon_color:\n child.text_color = self.specific_text_color\n else:\n child.text_color = self.action_icon_color\n\n\n\n\ndef get_tor_proxy_session():\n session = requests.session()\n # Tor uses the 9050 port as the default socks port\n session.proxies = {'http': 'socks5://127.0.0.1:9150',\n 'https': 'socks5://127.0.0.1:9150'}\n return session \n\ndef get_async_tor_proxy_session():\n from requests_futures.sessions import FuturesSession\n session = FuturesSession()\n # Tor uses the 9050 port as the default socks port\n session.proxies = {'http': 'socks5://127.0.0.1:9150',\n 'https': 'socks5://127.0.0.1:9150'}\n return session \n\n\ndef get_tor_python_session():\n from torpy.http.requests import TorRequests\n with TorRequests() as tor_requests:\n with tor_requests.get_session() as s:\n return s\n\ndef draw_background(widget, img_fn='assets/bg.png'):\n from kivy.core.image import Image as CoreImage\n from kivy.graphics import Color, Rectangle \n widget.canvas.before.clear()\n with widget.canvas.before:\n Color(.4, .4, .4, 1)\n texture = CoreImage(img_fn).texture\n texture.wrap = 'repeat'\n nx = float(widget.width) / texture.width\n ny = float(widget.height) / texture.height\n Rectangle(pos=widget.pos, size=widget.size, texture=texture,\n tex_coords=(0, 0, nx, 0, nx, ny, 0, ny))\n\n\n\n\n\n\n\n\n#### LOOPER\n\n\n\ndef route(uri):\n if not '/' in uri: return None\n prefix=uri.split('/')[1] #,channel,rest = uri.split('/',3)\n\n mapd = {\n 'inbox':'feed',\n 'outbox':'feed',\n 'login':'login',\n }\n return mapd.get(prefix,None)\n\n\n\n# DEFAULT_SCREEN = route(DEFAULT_URI)\n\nclass MainApp(MDApp):\n title = 'Komrade'\n logged_in=False\n # store = JsonStore('../p2p/.keys.json')\n # store_global = JsonStore('../p2p/.keys.global.json')\n store = JsonStore('app.json')\n login_expiry = 60 * 60 * 24 * 7 # once a week\n texture = ObjectProperty()\n uri = '/inbox/world'\n\n # def connect(self):\n # # connect to kad? \n # self.node = p2p.connect()\n def rgb(self,*_): return rgb(*_)\n\n def change_screen(self, screen, *args):\n self.screen=screen\n self.root.change_screen(screen,*args)\n\n @property\n def channel(self):\n if not hasattr(self,'uri'): return None\n if self.uri.count('/')<2: return None\n return self.uri.split('/')[2]\n\n def change_screen_from_uri(self,uri,*args):\n self.uri=uri\n self.log('CHANGING SCREEN',uri,'??')\n return self.root.change_screen_from_uri(uri,*args)\n\n @property\n def logger(self):\n if not hasattr(self,'_logger'):\n import logging\n handler = logging.StreamHandler()\n formatter = logging.Formatter('[%(asctime)s]\\n%(message)s\\n')\n handler.setFormatter(formatter)\n self._logger = logger = logging.getLogger(__file__)\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n return self._logger\n\n def log(self,*args,**msgs):\n line = ' '.join(str(x) for x in args)\n self.logger.debug(line)\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.event_loop_worker = None\n self.loop=asyncio.get_event_loop()\n \n # load json storage\n self.username=''\n self.load_store()\n self.uri=DEFAULT_URI\n # connect to API\n self.api = api.Api(log=self.log,app=self)\n\n @property\n def channel(self):\n return self.uri.split('/')[2] if self.uri and self.uri.count('/')>=2 else None\n \n\n \n\n\n def get_session(self):\n # return get_async_tor_proxy_session()\n return get_tor_proxy_session()\n #return get_tor_python_session()\n\n def get_username(self):\n if hasattr(self,'username'): return self.username\n self.load_store()\n if hasattr(self,'username'): return self.username\n return ''\n\n def build(self):\n # bind \n global app,root\n app = self\n \n # load root\n self.root = root = Builder.load_file('root.kv')\n draw_background(self.root)\n \n # edit logo\n toolbar=root.ids.toolbar\n toolbar.md_bg_color = root.rgb(*COLOR_TOOLBAR)\n toolbar.action_icon_color=root.rgb(*COLOR_ICON)\n logo=toolbar.ids.label_title\n logo.font_name='assets/Strengthen.ttf'\n logo.font_size='58dp'\n logo.pos_hint={'center_y':0.43}\n logo.text_color=root.rgb(*COLOR_LOGO)\n \n self.root.change_screen_from_uri(self.uri if self.uri else DEFAULT_URI)\n \n return self.root\n\n\n\n\n def load_store(self):\n if not self.store.exists('user'): return\n userd=self.store.get('user')\n if not userd: return\n\n self.username = userd.get('username','')\n \n def register(self,un):\n async def do():\n dat = await self.api.register(un)\n if 'success' in dat:\n self.username=un\n self.store.put('user',username=un)\n self.root.ids.login_screen.login_status.text=dat['success']\n self.root.ids.login_screen.login_status.theme_text_color='Custom'\n self.root.ids.login_screen.login_status.text_color=rgb(*COLOR_ACCENT)\n await asyncio.sleep(1)\n #self.save_login(dat)\n # self.change_screen_from_uri('/inbox/world')\n self.change_screen('profile')\n return True\n elif 'error' in dat:\n self.root.ids.login_screen.login_status.text=dat['error']\n # await asyncio.sleep(3)\n # self.change_screen_from_uri('/inbox/world')\n return False\n asyncio.create_task(do())\n\n async def upload(self,filename,file_id=None):\n self.log('uploading filename:',filename)\n rdata=await self.api.upload(filename,file_id=file_id)\n self.log('upload result:',rdata)\n if rdata is not None:\n rdata['success']='File uploaded'\n return rdata\n return {'error':'Upload failed'}\n \n async def download(self,file_id,output_fn=None):\n self.log('downloading:',file_id)\n file_dat = await self.api.download(file_id)\n self.log('file_dat =',file_dat)\n if not output_fn:\n file_id=file_dat['id']\n file_ext=file_dat['ext']\n output_fn=os.path.join('cache',file_id[:3]+'/'+file_id[3:]+'.'+file_ext)\n \n output_dir=os.path.dirname(output_fn)\n if not os.path.exists(output_dir): os.makedirs(output_dir)\n\n with open(output_fn,'wb') as of:\n for data_piece in file_dat['parts_data']:\n if data_piece is not None:\n of.write(data_piece)\n \n async def post(self, content='', file_id=None, file_ext=None, anonymous=False,channel='world'):\n #timestamp=time.time()\n jsond={}\n #jsond['timestamp']=\n if content: jsond['content']=str(content)\n if file_id: jsond['file_id']=str(file_id)\n if file_ext: jsond['file_ext']=str(file_ext)\n if channel and channel[0]=='@': channel=channel[1:]\n self.log(f'''app.post(\n content={content},\n file_id={file_id},\n file_ext={file_ext},\n anonymous={anonymous},\n channel={channel},\n [username={self.username}]'''\n )\n if not anonymous and self.username:\n jsond['author']=self.username\n \n #jsond['channel']=channel\n self.log('posting:',jsond)\n res=await self.api.post(jsond,channel = channel)\n if 'success' in res:\n self.root.change_screen('feed')\n return {'post_id':res['post_id']}\n \n\n @property\n def keys(self):\n return self.api.keys\n \n async def get_post(self,post_id):\n return await self.api.get_post(post_id)\n\n async def get_posts(self,uri='/inbox/world'):\n if uri.count('/')<2: raise Exception('not a URI: '+uri)\n if 'login' in uri:\n raise Exception('!!!! '+uri)\n\n self.log(f'app.get_posts(uri={uri} -> ...')\n data = await self.api.get_posts(uri)\n self.log(f'app.get_posts() got back from api.get_posts() a {type(data)}')\n\n newdata=[]\n for d in data:\n # self.log('data d:',d)\n if not 'val' in d: continue\n newdict = dict(d['val'].items())\n newdict['timestamp']=float(d['time'])\n newdict['to_name']=d['channel']\n newdata.append(newdict)\n \n # return index\n return newdata\n\n async def get_channel_posts(self,channel,prefix='inbox'):\n # am I allowed to?\n if not channel in self.keys:\n self.log('!! tsk tsk dont be nosy')\n return\n return await self.get_posts(uri='/'+os.path.join(prefix,channel))\n\n async def get_channel_inbox(self,channel):\n return await self.get_channel_posts(channel=channel,prefix='inbox')\n \n async def get_channel_outbox(self,channel):\n return await self.get_channel_posts(channel=channel,prefix='outbox')\n\n async def get_my_posts(self,username=None,prefix='outbox'):\n if username is None and self.username: username=self.username\n if not username:\n self.log(f'!! whose posts?')\n return\n self.log(f'get_my_posts({self.username})')\n return await self.get_channel_outbox(username)\n\n\n\n ### SYNCHRONOUS?\n def app_func(self):\n '''This will run both methods asynchronously and then block until they\n are finished\n '''\n # self.other_task = asyncio.ensure_future(self.waste_time_freely())\n self.other_task = asyncio.ensure_future(self.api.connect_forever())\n\n async def run_wrapper():\n # we don't actually need to set asyncio as the lib because it is\n # the default, but it doesn't hurt to be explicit\n await self.async_run() #async_lib='asyncio')\n print('App done')\n self.other_task.cancel()\n\n return asyncio.gather(run_wrapper(), self.other_task)\n\n\n\n\n def open_dialog(self,msg):\n if not hasattr(self,'dialog') or not self.dialog:\n self.dialog = ProgressPopup()\n \n # raise Exception(self.dialog, msg)\n self.dialog.text=msg\n self.dialog.open()\n #stop\n\n def open_msg_dialog(self,msg):\n from screens.post.post import MessagePopup,ProgressPopup\n if not hasattr(self,'msg_dialog') or not self.msg_dialog:\n self.msg_dialog = MessagePopup()\n self.msg_dialog.ids.msg_label.text=msg\n self.msg_dialog.open()\n\n def close_dialog(self):\n if hasattr(self,'dialog'):\n self.dialog.dismiss()\n\n def close_msg_dialog(self):\n if hasattr(self,'msg_dialog'):\n self.msg_dialog.dismiss()\n\n\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n loop.run_until_complete(MainApp().app_func())\n loop.close()\n\n\n\n\n# def main():\n# # start_logger()\n# App = MainApp()\n# App.run()\n\n\n# if __name__ == '__main__':\n# # loop = asyncio.get_event_loop()\n# # asyncio.set_event_loop(loop)\n# # loop.run_until_complete(main())\n# # loop.close()\n# main()\n","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"531758778","text":"import os\nimport os.path\nimport sys\nimport shutil\nimport subprocess\nimport logging\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef build(source_path, build_path, install_path, targets):\n\n def _deliver(src, dst, symlink=False):\n if not symlink:\n if os.path.isdir(src):\n basename = os.path.basename(src)\n dst = os.path.join(dst, basename)\n shutil.copytree(src, dst)\n else:\n shutil.copy(src, dst)\n else:\n basename = os.path.basename(src)\n dst = os.path.join(dst, basename)\n if sys.platform.startswith('win'):\n if os.path.isdir(src):\n subprocess.call(['mklink', '/j', dst, src], shell=True)\n else:\n subprocess.call(['mklink', dst, src], shell=True)\n else:\n os.symlink(dst, src)\n\n def _install():\n # check if argument \"--symlink\" presents\n symlink = False\n if int(os.environ[\"__PARSE_ARG_SYMLINK\"]):\n symlink = True\n\n src = source_path\n\n logger.info('Src:{}'.format(src))\n logger.info('Dst:{}'.format(install_path))\n for name in os.listdir(src):\n # skipping some hidden files(e.g. .git)\n if name.startswith('.') or name == 'build':\n continue\n\n file_path = os.path.abspath(os.path.join(src, name))\n _deliver(file_path, install_path, symlink)\n\n # package.py is to be copied to install path automatically by rez build system\n # _deliver(os.path.join(source_path, 'package.py'), install_path, symlink)\n \n # manage necessary files starts with '.'\n whitelist = []\n for f in whitelist:\n file_ = os.path.join(source_path, f)\n if os.path.isfile(file_):\n _deliver(file_, install_path, symlink)\n\n if \"install\" in (targets or []):\n _install()\n\n\n# Below section is necessary for rez custom build system - custom.py,\n# which executes build_command (\"python {root}/rezbuild.py {install}\") defined in package.py\nif __name__ == '__main__':\n build(source_path=os.environ['REZ_BUILD_SOURCE_PATH'],\n build_path=os.environ['REZ_BUILD_PATH'],\n install_path=os.environ['REZ_BUILD_INSTALL_PATH'],\n targets=sys.argv[1:])\n","sub_path":"rezbuild.py","file_name":"rezbuild.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"60491442","text":"def fmax_fmin(arr):\n\tmaxArr = minArr = arr[0]\n\tfor i in range(1,len(arr)):\n\t\tif(arr[i] > maxArr):\n\t\t\tmaxArr = arr[i]\n\t\telif(arr[i] < minArr):\n\t\t\tminArr = arr[i]\n\tprint(maxArr,minArr)\t\n\n\nif __name__ == '__main__':\n\tarr = [14,23,10,18,16,35,42,56,13,21,9]\n\tfmax_fmin(arr)","sub_path":"findMaxMin_in_Array.py","file_name":"findMaxMin_in_Array.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"50511478","text":"# -*- coding: utf-8 -*-\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium import webdriver\nimport time\nimport math\n\ndef calc(x):\n return str(math.log(abs(12*math.sin(int(x)))))\n\nb = ''\nlink = 'http://suninjuly.github.io/explicit_wait2.html'\nb = webdriver.Firefox()\nb.implicitly_wait(1)\nb.get(link)\ntry:\n print('Handle tab -> {0:s}'.format(str(b.current_window_handle)))\n b.maximize_window()\n xpath_price = '//h5[@id=\"price\"]'\n xpath_button = '//button[@id=\"book\"]'\n txt = WebDriverWait(b, 12).until(EC.text_to_be_present_in_element((By.XPATH, xpath_price), '$100'))\n print(txt)\n btn1 = b.find_element_by_xpath(xpath_button)\n print(btn1.text)\n ret = btn1.click()\n print('Натиснули на кнопку')\n numb = b.find_element_by_xpath('//span[@id=\"input_value\"]').text\n set_text = b.find_element_by_xpath('//input[@id=\"answer\"]').send_keys(str(calc(numb)))\n b.find_element_by_xpath('//button[@id=\"solve\"]').click()\n\n txt = b.switch_to.alert.text\n b.switch_to.alert.accept()\n print(txt.split(':')[-1])\n print('*'*50)\n print(b.window_handles)\n time.sleep(1)\n\nfinally:\n b.quit()\n","sub_path":"2.4.7.py","file_name":"2.4.7.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"248532574","text":"import cv2\nimport numpy as np\nimport time\nimport math\nimport sys\n#Capturing Video through webcam.\ndef atapulii(centru_x1,centru_x2,centru_y1,centru_y2): #pitag\n ipot = math.sqrt(((centru_x1-centru_x2)**2)+((centru_y1-centru_y2)**2))\n return ipot\ndef pula_muie_cur(centers,k):\n boase = atapulii(centers[k-1][0],centers[k][0],centers[k-1][1],centers[k][1])\n print(boase)\n # print(len(centers))\n # if (len(centers)\n if int(boase) < 200:\n try:\n pula_muie_cur(centers, k+1)\n except:\n print(\"nu exista nici un\")\n else: \n print(\"a gasit pathul\")\n # sys.exit(1)\n # sum_x = centers[k][0] + centers[k+1][0]\n # sum_y = centers[k][1] + centers[k+1][1]\n # print(\"SUM_X: {}\".format(sum_x))\n # print(\"SUM_Y: {}\".format(sum_y))\n\ndef main():\n while(1):\n fps, img = cap.read()\n\n #converting frame(img) from BGR (Blue-Green-Red) to HSV (hue-saturation-value)\n\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\n #defining the range of blue color\n low_blue = np.array([94, 55, 50]) #94, 80, 2 #94, 50, 50\n high_blue = np.array([130, 255, 255]) #126, 255, 255\n\n low_green = np.array([50, 50, 50])\n high_green = np.array([102, 255, 255])\n\n\n #finding the range blue colour in the image\n blue_mask = cv2.inRange(hsv, low_blue, high_blue)\n green_mask = cv2.inRange(hsv, low_green, high_green)\n # mask = cv2.inRange(hsv, low, upper)\n\n #Morphological transformation, Dilation\n kernal = np.ones((5 ,5), \"uint8\")\n\n # blue=cv2.dilate(yellow, kernal)\n\n\n res_blue=cv2.bitwise_and(img, img, mask = blue_mask)\n res_green=cv2.bitwise_and(img, img, mask = green_mask)\n\n # Tracking Colour (Blue)\n (contours,hierarchy)=cv2.findContours(blue_mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n rectangle = []\n centers = []\n sum_x = 0\n sum_y = 0\n\n # centers = [()]\n for frame, contour in enumerate(contours):\n area = cv2.contourArea(contour)\n # approx = cv2.approxPolyDP(contour, 0.01*cv2.arcLength(contour, True), True)\n # x = approx.ravel()[0]\n # y = approx.ravel()[1]\n x_center = 0\n y_center = 0\n\n if(area > 2000): #300\n x, y, w, h = cv2.boundingRect(contour)\n border = cv2.rectangle(img, (x,y), (x+w, y+h), (255, 0, 0), 2)\n rectangle.append(border)\n for rectangles in range(0, len(rectangle)):\n cv2.putText(img, \"Rectangle\", (x, y), font, 0.75, (255,0,0), 1)\n x_center = int((2*x+w)/2)\n y_center = int((2*y+h)/2)\n # for k in range (0, len(centers) - 1):\n # sum_x += centers[rectangles][0]\n # for m in range (0, len(centers) - 1):\n # sum_y += centers[rectangles][1]\n\n # print(\"X1: {} X2: {}\".format(centers[0], centers[2]))\n #print(\"W{}: {} \\t H{}: {}\".format(rectangles, w, rectangles, h))\n #print(\"X_C{}: {} \\t Y_C{}: {} \\n\".format(rectangles, x_center, rectangles, y_center))\n # for k in range (0, len(centers) - 1):\n # diff_x -= centers[k][0]\n # diff_y -= centers[k][1]\n # if k >= stop:\n # break\n # print(\"SUM_X: {}\".format(diff_x))\n # print(\"SUM_Y: {}\".format(diff_y))\n # time.sleep(0.5)\n center_tuple = (x_center, y_center)\n centers.append(center_tuple)\n if len(centers) >= 2: \n for k in range(0, len(centers) - 1):\n \n print(\"XC_1: {} XC_2: {}\".format(centers[k-1][0], centers[k][0]))\n print(\"YC_1: {} YC_2: {}\".format(centers[k-1][1], centers[k][1]))\n \n pula_muie_cur(centers,k) \n #print(\"X_C{}: {} \\t Y_C{}: {} \\n\".format(rectangles, x_center, rectangles, y_center))\n center = cv2.rectangle(img, (x_center, y_center), (x_center+1, y_center+1), (255, 255, 0), 5)\n # print(\"SUM_X: {}\".format(sum_x))\n # print(\"SUM_Y: {}\".format(sum_y))\n # # if(len(centers) > 2):\n # print(\"1: {} 2: {}\".format(centers[0][0], centers[1][0]))\n # line = cv2.line(img, (centers[0][0], centers[0][1]), (centers[1][0], centers[1][1]), (0, 0, 255), 2)\n # for i in centers:\n # print(i[0])\n #print(\"1: {} 2: {}\".format(centers[0], centers[1]))\n # cv2.line(img, (x_center, y_center), (, ), (0, 0, 255), 2)\n\n cv2.imshow(\"Frame\", img)\n img = cv2.flip(img,1)\n cv2.imshow(\"Blue\",res_blue)\n # cv2.imshow(\"Green\",res_green)\n\n\n if cv2.waitKey(10) & 0xFF == 32:\n cap.release()\n cv2.destroyAllWindows()\n break\nif __name__==\"__main__\":\n cap = cv2.VideoCapture(0)\n font = cv2.FONT_HERSHEY_COMPLEX\n stop = 2\n main()\n","sub_path":"color_2.py","file_name":"color_2.py","file_ext":"py","file_size_in_byte":6691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"604321060","text":"from aiostatsd.client import StatsdClient\nfrom guillotina import app_settings\nfrom guillotina.async_util import IAsyncUtility\n\nimport asyncio\n\n\nclass IStatsdUtility(IAsyncUtility):\n pass\n\n\nclass StatsdUtility:\n\n def __init__(self, settings=None, loop=None):\n self._loop = loop\n\n async def initialize(self, app=None):\n settings = app_settings['statsd']\n app_settings['statsd_client'] = StatsdClient(\n settings['host'],\n settings['port'],\n settings.get('packet_size', 512),\n settings.get('flush_interval', 1.0)\n )\n asyncio.ensure_future(app_settings['statsd_client'].run())\n\n async def finalize(self, app):\n await app_settings['statsd_client'].stop()\n","sub_path":"guillotina_statsd/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"128395294","text":"#import sys\n#sys.path.append(\"/home/felix/Documents/Mines/Césure/_Stage Télécom/open-unmix-pytorch/\")\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport pytorch_model_summary\nfrom torchsummary import summary\nimport math\n\nimport normalization\nimport tf_transforms\nfrom utils import checkValidConvolution\n\ndef conv_block(in_chans,out_chans):\n return nn.Sequential(\n nn.Conv2d(in_chans, out_chans, kernel_size=4,stride=2,padding=1),\n nn.LeakyReLU(0.2),\n nn.BatchNorm2d(out_chans)\n )\n\ndef deconv_block(in_chans,out_chans,dropout=True,\n activation='relu',batchnorm=True):\n \n activations = nn.ModuleDict([\n ['sigmoid', nn.Sigmoid()],\n ['relu', nn.ReLU()]\n ])\n layers = [nn.ConvTranspose2d(in_chans,out_chans,kernel_size=4,stride=2,padding=1)]\n \n if dropout == True:\n layers.append(nn.Dropout2d(0.5))\n\n layers.append(activations[activation])\n \n if batchnorm == True:\n layers.append(nn.BatchNorm2d(out_chans))\n \n return nn.Sequential(*layers)\n\nclass Deep_u_net(nn.Module):\n def __init__(\n self,\n normalization_style='batch-specific',\n n_fft=4096,\n n_hop=1024,\n nb_channels=2,\n input_is_spectrogram=False,\n sample_rate=44100,\n input_mean=None,\n input_scale=None,\n max_bin=None\n ):\n \"\"\"\n Input: (nb_samples, nb_channels, nb_timesteps)\n or (nb_frames, nb_samples, nb_channels, nb_bins)\n Output: Power/Mag Spectrogram\n (nb_frames, nb_samples, nb_channels, nb_bins)\n \"\"\"\n\n super(Deep_u_net, self).__init__()\n self.stft = tf_transforms.STFT(n_fft=n_fft, n_hop=n_hop)\n self.spec = tf_transforms.Spectrogram(power=1)\n \n # register sample_rate to check at inference time\n self.register_buffer('sample_rate', torch.tensor(sample_rate))\n \n self.sp_rate = sample_rate\n\n if input_is_spectrogram:\n self.transform = tf_transforms.NoOp()\n else:\n self.transform = nn.Sequential(self.stft, self.spec)\n \n \n self.nb_output_bins = n_fft // 2 + 1\n if max_bin:\n self.nb_bins = max_bin\n else:\n self.nb_bins = self.nb_output_bins\n \n self.normalize_input = normalization.Normalize(normalization_style,\n input_mean,\n input_scale,\n self.nb_output_bins)\n self.encoder = nn.ModuleList()\n self.encoder.append(conv_block(nb_channels,16))\n \n for i in range(0,5):\n in_chans = 16*2**i\n self.encoder.append(conv_block(in_chans,2*in_chans))\n \n self.decoder = nn.ModuleList()\n self.decoder.append(deconv_block(16*2**5,16*2**4,dropout=True))\n for i in range(5,3,-1):\n in_chans = 16*2**i\n self.decoder.append(deconv_block(in_chans,in_chans//4,dropout=True))\n \n for i in range(3,1,-1):\n in_chans = 16*2**i\n self.decoder.append(deconv_block(in_chans,in_chans//4,dropout=False))\n \n self.decoder.append(deconv_block(16*2**1,nb_channels,dropout=False,activation='relu',batchnorm=False))\n \n def forward(self, mix):\n x = self.transform(mix) # transform to spectrogram on the fly\n nb_frames, nb_samples, nb_channels, nb_bins = x.data.shape\n \n # reshape to [nb_samples,nb_channels,nb_frames,nb_bins]\n x = torch.reshape(x,(nb_samples,nb_channels,-1,nb_bins))\n\n x_original = x.detach().clone() # save the mixture for masking later\n \n x = self.normalize_input(x) # Normalize the input\n \n x = x[...,:512] # keep 512 and not 513 frequency bins so as to have nice convs\n saved = [] # Saved output of encoder convolutional layers for stacking later\n \n for encode in self.encoder:\n saved.append(x)\n x = encode(x)\n #checkValidConvolution(x.shape[-1],kernel_size=4,stride=2,padding=1,note=\"encoder conv block\")\n #checkValidConvolution(x.shape[-2],kernel_size=4,stride=2,padding=1,note=\"encoder conv block\")\n \n for decode in self.decoder:\n x = decode(x)\n if len(saved) > 1: # Stack except for the first\n y = saved.pop(-1)\n x = torch.cat((x,y),1) # stack over channel dimension\n \n # we go back to 513 bins and mutiply the original spectrogram by our mask\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n ones = torch.ones(nb_samples,nb_channels,nb_frames,1).to(device)\n x = torch.cat((x,ones),3)\n x = x * x_original\n \n x = x.permute(2,0,1,3) # output [nb_frames, nb_samples, nb_channels, nb_bins]\n return x # return the magnitude spectrogram of the estimated source\n\nif __name__ == '__main__':\n import numpy as np\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n nb_channels = 1\n deep_u_net = Deep_u_net(\n nb_channels=nb_channels,\n sample_rate=8192,\n n_fft=1024,\n n_hop=768\n ).to(device)\n \n time = 98560\n mix = (torch.rand(16, nb_channels, time)+2)**2\n mix = mix.to(device)\n res = deep_u_net(mix) \n \n #print(pytorch_model_summary.summary(deep_u_net, mix, show_input=False))\n \n #summary(deep_u_net, input_size=(2, 262144),batch_size=16)","sub_path":"models/deep_u_net.py","file_name":"deep_u_net.py","file_ext":"py","file_size_in_byte":5631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"301735143","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom plyer import notification\nimport time\n\n# obtaining information\nres = requests.get('https://www.worldometers.info/coronavirus/').text\nsoup = BeautifulSoup(res,'html.parser')\nsoup.encode('utf-8')\ncases = soup.find(\"div\", {\"class\": \"maincounter-number\"}).get_text().strip()\n\n#Functions for notification\ndef notifyMe(title,message):\n\tnotification.notify(\n\t\ttitle = title,\n\t\tmessage = message,\n\t\ttimeout = 5 )\n\n#notification \nwhile True:\n\tnotifyMe('Infected with coronavirus',cases)\n#interval notification\n\ttime.sleep(300)","sub_path":"coronavirusnotification.py","file_name":"coronavirusnotification.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"63361762","text":"def det(x, hs, n):\n sec = [(x-hs[i][0])//hs[i][1] for i in range(n)]\n sec = sorted(sec)\n for i, s in enumerate(sec):\n if i > s:\n return False\n return True\n\n\ndef main():\n N = int(input())\n HS = [list(map(int, input().split())) for i in range(N)]\n\n # 判定問題に置き換えて探索範囲を狭めていく\n l = 0\n u = 10**16\n while u - l > 1:\n mid = (u+l) // 2\n if det(mid, HS, N):\n u = mid\n else:\n l = mid\n\n print(u)\n\nif __name__ == \"__main__\":\n main()","sub_path":"abc/23/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"176343776","text":"import json\nfrom http import HTTPStatus\n\nfrom flask import Blueprint, request, Response\n\nfrom src.negocio.CodigosRespuesta import MALA_SOLICITUD, RECURSO_CREADO, OK\nfrom src.negocio.CodigoDescuento import CodigoDescuento\nfrom src.servicios.Auth import Auth\n\nrutas_codigo = Blueprint(\"rutas_codigo\", __name__)\n\n\n@rutas_codigo.route(\"/codigos\", methods=[\"POST\"])\ndef registrar_codigo():\n codigo_recibido = request.json\n valores_requeridos = {\"titulo\", \"descripcion\", \"codigo\", \"fechaCreacion\", \"fechaFin\", \"publicador\", \"categoria\"}\n respuesta = Response(status=MALA_SOLICITUD)\n if codigo_recibido is not None:\n if all(llave in codigo_recibido for llave in valores_requeridos):\n codigo_descuento = CodigoDescuento()\n codigo_descuento.titulo = codigo_recibido[\"titulo\"]\n codigo_descuento.descripcion = codigo_recibido[\"descripcion\"]\n codigo_descuento.fechaCreacion = codigo_recibido[\"fechaCreacion\"]\n codigo_descuento.fechaFin = codigo_recibido[\"fechaFin\"]\n codigo_descuento.categoria = codigo_recibido[\"categoria\"]\n codigo_descuento.publicador = codigo_recibido[\"publicador\"]\n codigo_descuento.codigo = codigo_recibido[\"codigo\"]\n status = codigo_descuento.registrar_codigo()\n if status == RECURSO_CREADO:\n respuesta = Response(\n json.dumps({\n \"idPublicacion\": codigo_descuento.idPublicacion,\n \"titulo\": codigo_descuento.titulo,\n \"descripcion\": codigo_descuento.descripcion,\n \"fechaCreacion\": codigo_descuento.fechaCreacion,\n \"fechaFin\": codigo_descuento.fechaFin,\n \"publicador\": codigo_descuento.publicador,\n \"codigo\": codigo_descuento.codigo\n }),\n status=status,\n mimetype=\"application/json\"\n )\n else:\n respuesta = Response(status=status)\n else:\n respuesta = Response(status=MALA_SOLICITUD)\n\n return respuesta\n\n\n@rutas_codigo.route(\"/codigos/\", methods=[\"PUT\"])\ndef actualizar_codigo(idPublicacion):\n codigo_recibido = request.json\n valores_requeridos = {\"titulo\", \"descripcion\", \"codigo\", \"fechaCreacion\", \"fechaFin\", \"categoria\"}\n respuesta = Response(status=MALA_SOLICITUD)\n if codigo_recibido is not None:\n if all(llave in codigo_recibido for llave in valores_requeridos):\n codigo_descuento = CodigoDescuento()\n codigo_descuento.idPublicacion = idPublicacion\n codigo_descuento.titulo = codigo_recibido[\"titulo\"]\n codigo_descuento.descripcion = codigo_recibido[\"descripcion\"]\n codigo_descuento.fechaCreacion = codigo_recibido[\"fechaCreacion\"]\n codigo_descuento.fechaFin = codigo_recibido[\"fechaFin\"]\n codigo_descuento.categoria = codigo_recibido[\"categoria\"]\n codigo_descuento.codigo = codigo_recibido[\"codigo\"]\n status = codigo_descuento.actualizar_codigo(idPublicacion)\n\n token = request.headers.get(\"token\")\n if status == HTTPStatus.OK:\n respuesta = Response(\n json.dumps(codigo_descuento.convertir_a_json()),\n status=status,\n mimetype=\"application/json\"\n )\n else:\n respuesta = Response(status=status)\n else:\n respuesta = Response(status=MALA_SOLICITUD)\n\n return respuesta\n\n\n@rutas_codigo.route(\"/codigos\", methods=[\"GET\"])\ndef obtener_codigo():\n pagina = request.args.get(\"pagina\", default=1, type=int)\n categoria = request.args.get(\"categoria\", default=-1, type=int)\n id_publicador = request.args.get(\"idPublicador\", default=0, type=int)\n if id_publicador != 0:\n codigos = CodigoDescuento.obtener_por_id_publicador(pagina, id_publicador)\n else:\n codigos = CodigoDescuento.obtener_codigo(pagina, categoria)\n if codigos:\n codigos_jsons = []\n for codigo in codigos:\n codigos_jsons.append(codigo.convertir_a_json())\n prueba = json.dumps(codigos_jsons)\n print(prueba)\n respuesta = Response(json.dumps(codigos_jsons), status=HTTPStatus.OK, mimetype=\"application/json\")\n else:\n respuesta = Response(status=HTTPStatus.NOT_FOUND)\n return respuesta\n","sub_path":"src/servicios/RutasCodigoDescuento.py","file_name":"RutasCodigoDescuento.py","file_ext":"py","file_size_in_byte":4469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"488202203","text":"#__author: slip\n#__date: 2019/3/1\nshengA = { 'A':\n {'A1': ['A11', 'A22'],\n 'A2': ['A21', 'A22'],\n 'A3': ['A31', 'A32'],\n }}\nshengB = { 'B':\n {'B1': ['B11', 'B22'],\n 'B2': ['B21', 'B22'],\n 'B3': ['B31', 'B32'],\n }}\nflag = True\nwhile True:\n p1 = input('请输入省(A/B)(输入q退出):')\n if p1 in shengA:\n for i in shengA[p1].keys():\n print(i)\n elif p1 in shengB:\n for i in shengB[p1].keys():\n print(i)\n elif p1 == 'q'or'l':\n break\n while True:\n p2 = input(\"请输入市(A1/A2/A3)(输入q退出,输入l返回上一级):\")\n if p2 in shengA['A'].keys():\n AA = sum(list(shengA['A'].values()), [])\n for i in AA:\n print(i)\n while True:\n C1 = input(\"请输入县(A11/...)(输入q退出,输入l返回上一级):\")\n if C1 == 'q':\n flag = False\n break\n elif C1 == 'l':\n break\n elif p2 in shengB['B'].keys():\n BB = sum(list(shengB['B'].values()), [])\n for i in BB:\n print(i)\n while True:\n p3 = input(\"请输入县(B11/...)(输入q退出,输入l返回上一级):\")\n#此处能加四层\n if p3 == 'q':\n flag = False\n break\n elif p3 == 'l':\n break\n if flag == False:\n break\n elif p2 == 'q' or flag == False:\n flag = False\n break\n elif p2 == 'l':\n break\n if flag == False:\n break\n# print(A)\n# list_2 = [i for j in A for i in j]\n# print(list_2)\n# #list_2 = sum(A,[])\n#print(list_2)\n# G = [['A11', 'A22'], 'A21', 'A22', ['A31', 'A32']]\n# G1 = sum(G,[])\n# print()\n","sub_path":"week1/三级菜单.py","file_name":"三级菜单.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"561208908","text":"# -*- coding:utf-8 -*-\n\n# 5、查找列表li中的元素,移除每个元素的空格, ----对字符串进行去空格\n# 并找出以’A’或者’a’开头,并以’c’结尾的所有元素,----找出字符串\n# 并添加到一个新列表中,最后循环打印这个新列表。----字符串添加到新列表\n# li = [‘taibai ’,’alexC’,’AbC ’,’egon’,’ Ritian’,’ Wusir’,’ aqc’]\n\n'''\n# 方法1:\n\nli = ['taibai ','alexC','Abc ','egon','Ritian',' Wusir',' aqc']\nnew_li = []\nfor i in li:\n if (i.strip().startswith('A') or i.strip().startswith('a')) and i.strip().endswith('c'):\n s = i.strip()\n new_li.append(s)\n\nprint(new_li)\n'''\n\n# 方法2:\n'''\nli = ['taibai ','alexC','Abc ','egon','Ritian',' Wusir',' aqc']\nnew_li = []\nfor i in li:\n if i.strip()[0].upper() == 'A' and i.strip()[-1] == 'c':\n new_li.append(i.strip())\nprint(new_li)\n'''\n\n# 方法3:\n'''\n\nli = ['taibai ','alexC','Abc ','egon','Ritian',' Wusir',' aqc']\nnew_li = []\nfor i in li:\n if (i.strip()[0].startswith('a') or i.strip()[0].startswith('A')) and i.strip()[-1].endswith('c'):\n new_li.append(i.strip())\nprint(new_li)\n'''\n\n\n# 6、开发敏感词语过滤程序,提示用户输入评论内容,如果用户输入的内容中包含特殊的字符:\n# 敏感词列表 li = [\"苍老师\",\"东京热\",”武藤兰”,”波多野结衣”]\n# 则将用户输入的内容中的敏感词汇替换成***,并添加到一个列表中;\n# 如果用户输入的内容没有敏感词汇,则直接添加到上述的列表中。\n\nli = ['苍老师','东京热','武藤兰','波多野结衣']\nnew_li = []\ninfo = input('请输入你的评论内容:')\nfor i in li: #先小范围,\n if i in info: #再判断小范围在不在大范围里面,因为我们输入的肯定要包含小范围的,否则不替换\n num = len(i)\n info = info.replace(i,'*'*num)\nnew_li.append(info)\n\nprint(new_li)\n\n","sub_path":"Python基础&并发编程/day5/day4作业复习.py","file_name":"day4作业复习.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"224708830","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nfrom utils import BinaryTree\n\n\nclass Solution(object):\n def printTree(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[str]]\n \"\"\"\n if root is None:\n return [\" \"]\n\n def depth(root):\n if root is None:\n return 0\n return max(depth(root.left), depth(root.right)) + 1\n\n d = depth(root)\n self.ans = [[\" \"] * (2 ** d - 1) for _ in range(d)]\n\n def helper(node, d, pos):\n self.ans[-d - 1][pos] = str(node.val)\n if node.left:\n helper(node.left, d - 1, pos - 2 ** (d - 1))\n if node.right:\n helper(node.right, d - 1, pos + 2 ** (d - 1))\n\n helper(root, d - 1, 2 ** (d - 1) - 1)\n return self.ans\n\n # if root is None:\n # return []\n # ans = [root.val]\n # if root.left:\n # return [] + self.printTree(root.left) + []\n # if root.right:\n # return [] + self.printTree(root.right) + []\n # return ans\n\n\nval = [2, 3, 4, 5, 6]\ntree = BinaryTree.Tree()\nfor i in val:\n tree.add(i)\n# print(tree.preoder(tree.root))\nfor i in Solution().printTree(tree.root):\n for j in i:\n print(j, end=\"\")\n print()\n#\n","sub_path":"vol 6/655 Todo.py","file_name":"655 Todo.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"46814267","text":"import socket\r\nimport datetime\r\n\r\ntarget = input(\"Enter a host to scan: \")\r\n\r\nf = open(target, \"w\")\r\nf.write(\">> Port scan of \" + str(target) + \" <<\\n\\n\")\r\n\r\nprint(\"-\" * 60)\r\nprint(\"Now scanning ports 1 - 1025 on the target host.\")\r\nprint(\"Please wait while the results of your scan are collected.\")\r\nprint(\"This may take several minutes.\")\r\nprint(\"-\" * 60)\r\n\r\nstart_time = datetime.datetime.now()\r\nf.write(\"Time scan started: \" + str(start_time) + \"\\n\\n\")\r\n\r\ndef pscan(port):\r\n try:\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.connect((target,port))\r\n s.close()\r\n return True\r\n except ConnectionRefusedError:\r\n f.write(\"Port \" + str(x) + \" - Host refused connection.\\n\")\r\n except TimeoutError:\r\n print(\"Host did not respond.\")\r\n f.write(\"Connection timed out. \\nHost is not available.\\n\")\r\n print(\"The scan is ending early.\")\r\n print(\"Please try again or pick a different host to scan.\")\r\n end_time = datetime.datetime.now()\r\n f.write(\"\\nTime scan ended: \" + str(end_time) + \"\\n\")\r\n total_time = end_time - start_time\r\n print(\"-\" * 60)\r\n print(\"Time elapsed: \" + str(total_time))\r\n f.write(\"Duration of scan: \" + str(total_time) + \"\\n\")\r\n print(\"-\" * 60)\r\n print(\"The full results of your scan have been collected in a\")\r\n print(\"file named '\" + target + \"' for review.\")\r\n f.close()\r\n exit()\r\n except OSError[WindowsError(10065)]:\r\n f.write(\"Host unreachable.\")\r\n except socket.gaierror:\r\n f.write(\"Connection Reset Error\")\r\n except socket.error:\r\n f.write(\"Hostname could not be resolved.\")\r\n\r\nfor x in range(1, 1026):\r\n if pscan(x):\r\n result = \">> Port \" + str(x) + \" is OPEN.\\n\"\r\n f.write(result)\r\n print('Port',x,'is open')\r\n\r\nend_time = datetime.datetime.now()\r\nf.write(\"\\nTime scan ended: \" + str(end_time) + \"\\n\")\r\ntotal_time = end_time - start_time\r\nprint(\"-\" * 60)\r\nprint(\"Time elapsed: \" + str(total_time))\r\nf.write(\"Duration of scan: \" + str(total_time) +\"\\n\")\r\nf.close()\r\n\r\nprint(\"-\" * 60)\r\nprint(\"The full results of your scan have been collected in a\")\r\nprint(\"file named '\" + target + \"' for review.\")\r\n\r\n\r\n#program that should test the given host and it should generate a list of\r\n#all TCP Open ports within range of 1 to 1025. Use only standard Python’s “socket” library.\r\n#1. On execution of program system should prompt “Enter a host to scan”. User will provide a host name\r\n#2. System should look for all the ports between the range of 1 to 1025\r\n#3. If the Ports is open it should create a file and add an entry for port number\r\n#4. In case of any exception for instance “host is not available”, “host name could not be resolved” or due to any other error you need to write that exception into same file\r\n#5. You also need to record starting and ending date and time at the beginning and ending of file accordingly. It should also show the total time it took in port scanning process","sub_path":"PortScan.py","file_name":"PortScan.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"244270739","text":"# -*- coding: utf-8 -*-\nfrom odoo import models, api, fields, _\nfrom odoo.exceptions import ValidationError\nimport math\n\n\nclass TaxExceptions(models.Model):\n _name = 'account.tax.exceptions'\n\n sequence_id = fields.Char(string='Code', required=True, ondelete='cascade',translate=True)\n partner_id = fields.Many2one(\n 'res.partner',\n string=\"Partner/Empresa\",\n translate=True,\n domain=\"['|',('customer','=',True),('supplier','=',True)]\",\n required=True)\n main_id_number = fields.Char(related='partner_id.main_id_number',readonly=True)\n per_tax_code = fields.Many2one(\n 'account.tax',\n domain=\"[('type_tax_use','=','sale'),('active','=',True),('tax_group_id.tax','in',['gross_income','profits','vat']),('tax_group_id.type','=','perception')]\",\n translate=True,\n string=\"Tax Code\"\n )\n wh_tax_code = fields.Many2one(\n 'account.tax',\n domain=\"[('type_tax_use','=','supplier'),('active','=',True),('withholding_type','in',['tabla_ganancias','arba_ws','based_on_rule'])]\",\n string=\"Withholding Tax Code\",\n translate=True,\n )\n income_reg_code = fields.Many2one('afip.tabla_ganancias.alicuotasymontos',string=\"Income Regime Code\",)\n sdate = fields.Date(string=\"Start Date\",required=True,translate=True,)\n edate = fields.Date(string=\"End Date\",required=True,translate=True,)\n active = fields.Boolean(string=\"Active\",default=True)\n ex_type = fields.Selection([('total','Total'),('parcial','Parcial')], string=\"Type\", default='total', translate=True)\n ex_rate = fields.Float(digits=(4, 2),string=\"Tax Exception Rate\")\n wh_ex_rate = fields.Float(digits=(4, 2),string=\"Withholding Tax Exception Rate\")\n\n\n @api.onchange('wh_tax_code')\n def onchange_wh_tax_code(self):\n if self.wh_tax_code and self.wh_tax_code.withholding_type=='tabla_ganancias':\n self.income_reg_code = self.wh_tax_code.reg_gan_id.id\n\n @api.model\n def default_get(self, fields_list):\n res = dict()\n res['sequence_id'] = self.env['ir.sequence'].next_by_code('seq.tax.exceptions')\n return res\n\n @api.onchange('ex_rate')\n def _onchange_digit_prec1(self):\n frac, whole = math.modf(self.ex_rate)\n self.ex_rate = math.floor(self.ex_rate % 100) + frac\n\n @api.onchange('wh_ex_rate')\n def _onchange_digit_prec1(self):\n frac, whole = math.modf(self.wh_ex_rate)\n self.wh_ex_rate = math.floor(self.wh_ex_rate % 100) + frac\n\n @api.multi\n def name_get(self):\n return [(record.id, record.sequence_id) for record in self]\n\n","sub_path":"odoo-argentina-vitt/vitt_tax_exceptions/models/tax_exceptions.py","file_name":"tax_exceptions.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"299310282","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n Puzzle match program\n Author: M. Dean Bahari\n\n This file contains image manipulation\n codes for:\n 1. Grayscaling\n 2. Inverting\n 3. Splitting\n\"\"\"\n\nfrom PIL import Image\n\n\ndef image2gray(img):\n data = list(img.getdata())\n if img.mode == \"P\":\n pal = img.getpalette()\n\n # Return as RGB image, except image with alpha channel\n form = \"RGB\"\n\n for idx, pixel in enumerate(data):\n alpha = 255\n if img.mode == \"P\":\n pixel *= 3\n gray = (pal[pixel + 0] + pal[pixel + 1] + pal[pixel + 2]) / 3\n elif img.mode == \"L\":\n gray = pixel[idx]\n elif img.mode == \"RGB\":\n gray = (pixel[0] + pixel[1] + pixel[2]) / 3\n elif img.mode == \"RGBA\":\n gray = (pixel[0] + pixel[1] + pixel[2]) / 3\n alpha = pixel[3]\n form = \"RGBA\"\n else: # Unsupported image mode\n return False\n\n data[idx] = (gray, gray, gray, alpha)\n\n imgnew = Image.new(form, (img.width, img.height))\n imgnew.putdata(data)\n\n return imgnew\n\n\ndef image2inverted(img):\n data = list(img.getdata())\n if img.mode == \"P\":\n pal = img.getpalette()\n\n # Return as RGB image, except image with alpha channel\n form = \"RGB\"\n\n for idx, pixel in enumerate(data):\n if img.mode == \"P\":\n pixel *= 3\n pixel = (pal[pixel + 0], pal[pixel + 1], pal[pixel + 2])\n elif img.mode == \"L\":\n pixel = (pixel, pixel, pixel)\n elif img.mode == \"RGBA\":\n form = \"RGBA\"\n elif img.mode != \"RGB\": # Unsupported image mode\n return False\n\n if img.mode == \"RGBA\":\n pixel = (255 - pixel[0], 255 - pixel[1], 255 - pixel[2], pixel[3])\n else:\n pixel = (255 - pixel[0], 255 - pixel[1], 255 - pixel[2])\n\n data[idx] = pixel\n\n imgnew = Image.new(form, (img.width, img.height))\n imgnew.putdata(data)\n\n return imgnew\n\n\ndef image_split_sub(data, coord, isize, psize):\n # Calculate square coord of image piece\n left = coord[0] * psize[0]\n right = left + psize[0]\n top = coord[1] * psize[1]\n bottom = top + psize[1]\n\n # Start spliting\n newdata = []\n for i in range(top, bottom):\n for j in range(left, right):\n newdata.append(data[i * isize[0] + j])\n\n img = Image.new(\"RGBA\", psize)\n img.putdata(newdata)\n\n return img\n\n\n# ssize = split size\n# fsize = frame size\ndef image_split(img, ssize, fsize):\n # Piece size\n pwidth = img.width // ssize[0]\n pheight = img.height // ssize[1]\n\n # List containing splitted image\n split = []\n\n # Convert image to RGBA before processing\n if img.mode != \"RGBA\":\n data = list(img.getdata())\n if img.mode == \"P\":\n pal = img.getpalette()\n\n for idx, pixel in enumerate(data):\n if img.mode == \"P\":\n pixel *= 3\n pixel = (pal[pixel + 0], pal[pixel + 1], pal[pixel + 2], 255)\n elif img.mode == \"L\":\n pixel = (pixel, pixel, pixel, 255)\n elif img.mode == \"RGB\":\n pixel += (255,)\n\n data[idx] = pixel\n else:\n data = img.getdata()\n\n # Create frame for image\n fdata = list(data)\n for x in range(fsize[0], img.width - fsize[0]):\n for y in range(fsize[1], img.height - fsize[1]):\n idx = y * img.width + x\n fdata[idx] = (0, 0, 0, 0)\n\n frame = Image.new(\"RGBA\", (img.width, img.height))\n frame.putdata(fdata)\n split.append((\"frame\", frame))\n\n # Create splitted img\n for x in range(0, ssize[0]):\n for y in range(0, ssize[1]):\n split.append((str(y + 1) + \"-\" + str(x + 1),\n image_split_sub(data, (x, y),\n (img.width, img.height),\n (pwidth, pheight))))\n \n return split\n","sub_path":"pmutils.py","file_name":"pmutils.py","file_ext":"py","file_size_in_byte":3955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"9209878","text":"from vpython import *\n\n# create and tune canvas\nscene = canvas(title='Examples of Tetrahedrons',\n width=1450, height=650,\n center=vector(0,0,0), background=color.cyan)\n\n# activate canvas with box\nbox()\n\n# create and tune canvas\nscene2 = canvas(title='Examples of Tetrahedrons',\n width=1450, height=650,\n center=vector(0,0,0), background=color.cyan)\n\nbox()\n\n# choise the current canvas for all subsequent objects\nscene.select()\n\nbox(pos=vec(0, 2, 0))","sub_path":"some.py","file_name":"some.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"322023696","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/3/13 \n# @Author : Edrain\n\n\ndef is_num(x):\n \"\"\"判断str是否为纯数字\"\"\"\n try:\n int_x = int(x)\n return int_x\n except:\n print(\"有非纯数字\")\n\n\nif __name__ == '__main__':\n a = \"123\"\n print(is_num(a))\n","sub_path":"code_ed/Day01-15/code/Day01/nums01.py","file_name":"nums01.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"554264314","text":"\"\"\"\nfetch web resource\n\"\"\"\nimport requests\n\n\nclass Fetcher(object):\n \"\"\"\n fetcher interface\n :returns status_code, content\n \"\"\"\n def fetch(self, url, method, headers, data):\n raise NotImplementedError\n\n\nclass RequestsFetcher(object):\n def __init__(self, timeout=10):\n self.timeout = timeout\n\n \"\"\"\n fetcher using requests lib\n \"\"\"\n def fetch(self, url, method, headers, data):\n if method == \"GET\":\n r = requests.get(url, headers=headers, timeout=self.timeout)\n elif method == \"POST\":\n r = requests.post(url, headers=headers, data=data, timeout=self.timeout)\n else:\n raise ValueError\n return r.status_code, r.content\n","sub_path":"wallstreet/crawel/fetcher.py","file_name":"fetcher.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"411599238","text":"\"\"\"\n Python Div Game Studios-like API over pyGame.\n\n Example:\n\n #!/usr/bin/env python\n\n from pydiv import *\n\n if __name__ == \"__main__\":\n set_mode(320,240)\n \n program.start()\n\"\"\"\n# This file is part of pydiv.\n#\n# Foobar is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Foobar is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Foobar. If not, see .\n\nall = [\n 'blendop',\n 'cd',\n 'crypt',\n 'core',\n 'debug',\n 'dir',\n 'draw',\n 'effects',\n 'ffi',\n 'file',\n 'flic',\n 'grproc',\n 'gui',\n 'joy',\n 'key',\n 'm7',\n 'map',\n 'math',\n 'mem',\n 'mouse',\n 'path',\n 'proc',\n 'rand',\n 'regex',\n 'say',\n 'screen',\n 'scroll',\n 'sort',\n 'sound',\n 'string',\n 'sys',\n 'text',\n 'time',\n 'timers',\n 'video',\n 'wm'\n]\n\nfrom core import program\nfrom grproc import GProcess\nfrom say import *\nfrom text import *\nfrom video import *\nfrom wm import *\n\n\n","sub_path":"pydiv/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"121672702","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"SCICO package configuration.\"\"\"\n\nimport os\nimport os.path\nfrom ast import parse\nfrom builtins import filter, next\n\nfrom setuptools import find_packages, setup\n\nname = \"scico\"\n\n# Get version number from scico/__init__.py\n# See http://stackoverflow.com/questions/2058802\nwith open(os.path.join(name, \"__init__.py\")) as f:\n version = parse(next(filter(lambda line: line.startswith(\"__version__\"), f))).body[0].value.s\n\npackages = find_packages()\n\n\nlongdesc = \"\"\"SCICO is a Python package for solving imaging inverse problems, with an emphasis on problems arising in scientific imaging applications. One of the primary focuses of the package is providing methods for solving ill-posed inverse problems with the use of an appropriate prior model of the reconstruction space.\n\"\"\"\n\ninstall_requires = [\"numpy\", \"scipy\", \"imageio\", \"jax\"]\ntests_require = [\"pytest\", \"pytest-runner\"]\npython_requires = \">3.8\"\n\n\nsetup(\n name=name,\n version=version,\n description=\"Scientific Computational Imaging COde: A Python \"\n \"package for scientific imaging problems\",\n long_description=longdesc,\n keywords=[\"Computational Imaging\", \"Inverse Problems\", \"Optimization\", \"ADMM\", \"PGM\"],\n platforms=\"Any\",\n license=\"BSD\",\n url=\"https://github.com/bwohlberg/scico\",\n author=\"SCICO Developers\",\n author_email=\"brendt@ieee.org\", # Temporary\n packages=packages,\n package_data={\"scico\": [\"data/*/*.png\", \"data/*/*.npz\"]},\n include_package_data=True,\n python_requires=python_requires,\n tests_require=tests_require,\n install_requires=install_requires,\n extras_require={\n \"tests\": tests_require,\n \"docs\": [\"sphinx >=3.5.2\", \"numpydoc\", \"sphinxcontrib-bibtex\"],\n },\n classifiers=[\n \"License :: OSI Approved :: BSD License\",\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n zip_safe=False,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"428950378","text":"import dash\nimport dash_core_components as dcc\nimport dash_bootstrap_components as dbc\nimport dash_html_components as html\nimport plotly.graph_objects as go\nfrom dash.dependencies import Input, Output\nimport pandas as pd\nimport numpy as np\nimport time\nimport threading\nfrom bs4 import BeautifulSoup\nimport requests\n\nbaseURLJH = \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/\"\n\n#external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\ncolors = {\n 'background': '#111111',\n 'text': 'rgb(119,189,217)'\n }\n\ntickFont = {'size': 15, \n 'color': colors['text'], \n 'family': 'sans-serif'\n }\n\n\ncols_int =['CumConfirmed', 'CumDeaths', 'CumRecovered']\n\ndef loadDataJH(fileName, columnName):\n\n data = pd.read_csv(baseURLJH + fileName) \\\n .melt(id_vars=['Province/State', 'Country/Region', 'Lat', 'Long'], var_name='date', value_name=columnName)\n \n data['Province/State'] = data['Province/State'].fillna('')\n data.fillna(0, inplace=True)\n\n data[columnName] = data[columnName].astype(np.int64)\n \n data['date'] = data['date'].astype('datetime64[ns]')\n return data\n\n\ndef loadData_ulklc():\n data = pd.read_csv('https://raw.githubusercontent.com/ulklc/covid19-timeseries/master/countryReport/raw/rawReport.csv')\n\n data['day'] = data['day'].astype('datetime64[ns]')\n\n data.rename(columns={'countryName': 'Country/Region', \n 'lat': 'Lat', 'lon': 'Long', \n 'confirmed': 'CumConfirmed', \n 'death':'CumDeaths', \n 'recovered':'CumRecovered', \n 'day': 'date'}, \n inplace=True)\n\n data['Province/State'] = ''\n\n data.drop(['region', 'countryCode'], axis=1, inplace=True)\n data = data[['Province/State', 'Country/Region', 'Lat', 'Long', 'date', 'CumConfirmed', 'CumDeaths', 'CumRecovered']]\n\n return data\n\n\n\nallData = loadData_ulklc()\ncountries = allData['Country/Region'].unique()\ncountries.sort()\n\nconfirmed_eg, recovers_eg, deaths_eg = 0, 0, 0\n\nRELOAD_INTERVAL = 1 * 3600 # reload interval in seconds\n\n\n\ndef refresh_data_every():\n while True:\n refresh_data()\n time.sleep(RELOAD_INTERVAL)\n\ndef refresh_data():\n global allData, countries, confirmed_eg, recovers_eg, deaths_eg\n ### some expensive computation function to update dataframe\n # allData = loadDataJH(\"time_series_covid19_confirmed_global.csv\", \"CumConfirmed\") \\\n # .merge(loadDataJH(\"time_series_covid19_deaths_global.csv\", \"CumDeaths\")) \\\n # .merge(loadDataJH(\"time_series_19-covid-Recovered.csv\", \"CumRecovered\"))\n allData = loadData_ulklc()\n countries = allData['Country/Region'].unique()\n countries.sort()\n\n print('DATA UPDATED!!')\n\nthread = threading.Thread(target=refresh_data_every, daemon=True)\nthread.start()\n\napp = dash.Dash(__name__)\napp.title = 'EG - Coronavirus COVID-19 Tracker'\n\napp.layout = html.Div(className='body',\n style={\n 'family':\"sans-serif\" ,\n 'backgroundColor': colors['background'],\n 'position':'absolute',\n 'width':'100%',\n 'height':'100%',\n 'top':'0px',\n 'left':'0px',\n 'z-index':'1000'\n },\n children=[\n html.H1('Egypt Coronavirus (COVID-19) Tracker', \n style={\n 'textAlign': 'center',\n 'margin-top': '3rem',\n 'color': colors['text']\n }),\n html.Div(className=\"row\", \n style={'margin-left': '2rem'},\n children=[\n html.Div(className=\"three columns\", children=[\n html.H5('Country', \n style={\n 'textAlign': 'left',\n 'color': colors['text']\n }\n ),\n dcc.Dropdown(\n id='country',\n options=[{'label':c, 'value':c} for c in countries],\n value='Egypt',\n disabled=False\n )\n ]),\n html.Div(className=\"three columns\", children=[\n html.H5('Governorate/State', \n style={\n 'textAlign': 'left',\n 'color': colors['text']\n }),\n dcc.Dropdown(\n id='state'\n )\n ]),\n html.Div(className=\"three columns\", children=[\n html.H5('Selected Metrics', \n style={\n 'textAlign': 'left',\n 'color': colors['text']\n }),\n dcc.Checklist(\n id='metrics',\n options=[{'label':m, 'value':m} for m in ['Confirmed', 'Deaths', 'Recovered', 'Active']],\n value=['Confirmed', 'Deaths', 'Active'], \n style={\n 'textAlign': 'left',\n 'color': colors['text']\n }\n )\n ])\n ]),\n html.Div(className=\"row\", children=[\n html.Div(className=\"three columns\", children=[\n html.Div(\n [html.H2(id=\"confirmed_text\"), html.H4(\"Confirmed\", \n style={\n 'textAlign': 'left',\n 'color': colors['background']\n }\n )\n ],\n id=\"confirmed\",\n className=\"mini_container\",\n )\n ]\n ),\n html.Div(className=\"three columns\", children=[\n html.Div(\n [html.H2(id='deaths_text'), \n html.H4(\"Deaths\", \n style={\n 'textAlign': 'left',\n 'color': colors['background']\n }\n )\n ],\n id=\"deaths\",\n className=\"mini_container\",\n )\n ]\n ),\n html.Div(className=\"three columns\", children=[\n html.Div(\n [html.H2(id=\"recovered_text\"), \n html.H4(\"Recovered\", \n style={\n 'textAlign': 'left',\n 'color': colors['background']\n }\n )\n ],\n id=\"recovered\",\n className=\"mini_container\",\n )\n ]\n ),\n html.Div(className=\"three columns\", children=[\n html.Div(\n [html.H2(id=\"active_text\"), \n html.H4(\"Active\", \n style={\n 'textAlign': 'left',\n 'color': colors['background']\n }\n )\n ],\n id=\"active\",\n className=\"mini_container\",\n ),\n ],\n )\n ]),\n\n html.Div(className=\"row\", children=[\n\n html.Div(className=\"three columns\", children=[\n html.Div(\n [html.H2(id='expected_cases_by_tomorrow_text', children='NA'), \n html.H5(\"Expected Cases by Tomorrow\", \n style={\n 'textAlign': 'left',\n 'color': colors['background']\n }\n )\n ],\n id=\"expected_cases_by_tomorrow\",\n className=\"mini_container\",\n )\n ]\n ),\n\n html.Div(className=\"three columns\", children=[\n html.Div(\n [html.H2(id=\"cases_increase_text\"), \n html.H5(\"Cases Increase From Yesterday\", \n style={\n 'textAlign': 'left',\n 'color': colors['background']\n }\n )\n ],\n id=\"cases_increase\",\n className=\"mini_container\",\n )\n ]\n ),\n html.Div(className=\"three columns\", children=[\n html.Div(\n [html.H2(id=\"mortality_rate_infection_text\"), \n html.H5(\"Mortality Rate / Infection Case\", \n style={\n 'textAlign': 'left',\n 'color': colors['background']\n }\n )\n ],\n id=\"mortality_rate_infections\",\n className=\"mini_container\",\n )\n ]\n ),\n html.Div(className=\"three columns\", children=[\n html.Div(\n [html.H2(id='mortality_rate_closed_text'), \n html.H5(\"Mortality Rate / Closed Case\", \n style={\n 'textAlign': 'left',\n 'color': colors['background']\n }\n )\n ],\n id=\"mortality_rate_closed\",\n className=\"mini_container\",\n )\n ]\n )\n ],\n )\n ,\n \n dcc.Loading(dcc.Graph(\n id=\"plot_cum_metrics\",\n config={ 'displayModeBar': False }\n )),\n dcc.Loading(dcc.Graph(\n id=\"plot_new_metrics\",\n config={ 'displayModeBar': False }\n ))\n \n # html.Div([\n # html.Div([\n # #html.H3('New Metrics'),\n # dcc.Graph(\n # id=\"plot_cum_metrics\",\n # config={ 'displayModeBar': False }\n # )\n # ], className=\"six columns\"),\n\n # html.Div([\n # #html.H3('Cum Metrics'),\n # dcc.Graph(\n # id=\"plot_new_metrics\",\n # config={ 'displayModeBar': False }\n # )\n # ], className=\"six columns\"),\n # ], className=\"row\")\n ,\n html.Div(className=\"row\", children=[\n dcc.Markdown(className='three columns', \n children=['''\n > Data by Johns Hopkins University Center for Systems Science and Engineering (JHU CSSE)\n\n > [Github](https://github.com/CSSEGISandData/COVID-19)\n '''],\n style={ 'width': '100%',\n 'textAlign': 'left',\n 'background-color': colors['background'],\n 'color': colors['text'],\n 'font-size': 13\n }\n )\n ]\n ),\n html.Div(id='intermediate', style={'display': 'none'}),\n dcc.Interval(id='interval-component', interval=1*1000) # in milliseconds\n ]\n )\n\ndef fix_data_errors(data):\n # fix erros where cum value of today less than yesterday\n df_fix_err = data.select_dtypes(include='int64').diff() < 0\n for col in df_fix_err.columns:\n error_ixs = df_fix_err.index[df_fix_err[col] == True]\n data.loc[error_ixs, col] = np.nan\n data[col] = data[col].fillna(method='ffill').astype(np.int64)\n\n return data\n\n@app.callback(\n Output('intermediate', 'children'),\n [Input('country', 'value'), Input('state', 'value')])\ndef nonreactive_data(country, state):\n data = allData.loc[allData['Country/Region'] == country].copy()\n\n data = data.iloc[-14:, :]\n \n data = fix_data_errors(data)\n\n data['CumActive'] = data['CumConfirmed'] - data['CumDeaths'] - data['CumRecovered']\n\n if state == '':\n data = data.drop('Province/State', axis=1).groupby(\"date\").sum().reset_index()\n else:\n data = data.loc[data['Province/State'] == state]\n newCases = data.select_dtypes(include='int64').diff().fillna(0)\n\n newCases.columns = [column.replace('Cum', 'New') for column in newCases.columns]\n data = data.join(newCases)\n #data['dateStr'] = data['date'].dt.strftime('%b %d, %Y')\n data['dateStr'] = data.date.dt.strftime('%d %b %y')\n data['DiffYesterday'] = ((data.NewConfirmed.shift(periods=-1) / data.CumConfirmed)*100).round(1)\n data['MortalityRateInfection'] = ((data.CumDeaths / data.CumConfirmed)*100).round(1)\n data['MortalityRateClosed'] = ((data.CumDeaths / (data.CumDeaths + data.CumRecovered))*100).round(1)\n data = data.loc[~(data[['CumConfirmed', 'CumDeaths', 'CumRecovered', 'NewConfirmed', 'NewDeaths']]==0).all(axis=1)]\n return data.to_json()\n\n@app.callback(\n [Output('state', 'options'), Output('state', 'value')],\n [Input('country', 'value')]\n)\ndef update_states(country):\n states = list(allData.loc[allData['Country/Region'] == country]['Province/State'].unique())\n states.insert(0, '')\n states.sort()\n state_options = [{'label':s, 'value':s} for s in states]\n state_value = state_options[0]['value']\n return state_options, state_value\n\ndef barchart(data, metrics, prefix=\"\", yaxisTitle=\"\"):\n figure = go.Figure(\n data=[\n go.Bar( \n name=metric, x=data.date, y=data[prefix + metric],\n marker_line_color='rgb(0,0,0)', marker_line_width=1,\n marker_color={ 'Deaths':'rgb(200,30,30)', \n 'Recovered':'rgb(30,200,30)', \n 'Confirmed': colors['text'], \n 'Active': 'rgb(245,140,10)'}[metric]\n ) for metric in metrics\n ],\n layout= {\n 'plot_bgcolor': colors['background'],\n 'paper_bgcolor': colors['background'],\n 'font': {\n 'color': colors['text']\n },\n 'xaxis':dict(showticklabels=True, fixedrange=True),\n 'yaxis':dict(showticklabels=True, fixedrange=True)\n }\n )\n figure.update_layout( \n barmode='group', legend=dict(x=.05, y=0.95, font={'size':15}, bgcolor='rgba(240,240,240,0.2)'), \n plot_bgcolor=colors['background'], font=tickFont) \\\n .update_xaxes( \n title=\"\", tickangle=-45, type='category', showgrid=False, gridcolor='#DDDDDD', \n tickfont=tickFont, ticktext=data.dateStr, tickvals=data.date) \\\n .update_yaxes(\n title=yaxisTitle, showgrid=True, gridcolor='#DDDDDD')\n return figure\n\ndef scatterchart(data, metrics, prefix=\"\", yaxisTitle=\"\"):\n figure = go.Figure(data=[\n go.Scatter( \n name=metric, x=data.date, y=data[prefix + metric],\n mode='lines+markers',\n marker_line_color='rgb(0,0,0)', marker_size=12,\n marker_color={ 'Deaths':'rgb(200,30,30)', \n 'Recovered':'rgb(30,200,30)', \n 'Confirmed': colors['text'], \n 'Active': 'rgb(245,140,10)'}[metric]\n ) for metric in metrics\n ],\n layout= {\n 'plot_bgcolor': colors['background'],\n 'paper_bgcolor': colors['background'],\n 'font': {\n 'color': colors['text']\n },\n 'xaxis':dict(showticklabels=True, fixedrange=True),\n 'yaxis':dict(showticklabels=True, fixedrange=True)\n }\n )\n figure.update_layout( \n legend=dict(x=.05, y=0.95, font={'size':15}, bgcolor='rgba(240,240,240,0.2)'), \n plot_bgcolor=colors['background'], font=tickFont) \\\n .update_xaxes( \n title=\"\", tickangle=-45, type='category', showgrid=False, gridcolor='#DDDDDD', \n tickfont=tickFont, ticktext=data.dateStr, tickvals=data.date) \\\n .update_yaxes(\n title=yaxisTitle, showgrid=True, gridcolor='#DDDDDD')\n return figure\n\n\n@app.callback(\n Output('plot_new_metrics', 'figure'), \n [Input('intermediate', 'children'), Input('metrics', 'value')]\n)\ndef update_plot_new_metrics(cleaned_data, metrics):\n data = pd.read_json(cleaned_data)\n metrics_ = [metric for metric in metrics if metric != 'Active']\n return barchart(data, metrics_, prefix=\"New\", yaxisTitle=\"New Cases per Day\")\n\n@app.callback(\n Output('plot_cum_metrics', 'figure'), \n [Input('intermediate', 'children'), Input('metrics', 'value')]\n)\ndef update_plot_cum_metrics(cleaned_data, metrics):\n data = pd.read_json(cleaned_data)\n return scatterchart(data, metrics, prefix=\"Cum\", yaxisTitle=\"Cumulated Cases\")\n\n@app.callback(\n [\n Output('confirmed_text', 'children'),\n Output('deaths_text', 'children'),\n Output('recovered_text', 'children'),\n Output('active_text', 'children'),\n Output('mortality_rate_infection_text', 'children'),\n Output('mortality_rate_closed_text', 'children'),\n Output('cases_increase_text', 'children'),\n ],\n [Input('intermediate', 'children'), Input('country', 'value')]\n)\ndef update_text(cleaned_data, country):\n data = pd.read_json(cleaned_data)\n try:\n new_cases = data['NewConfirmed'].iat[-1]\n if new_cases > 0:\n new_cases = str('+') + str(new_cases)\n else:\n new_cases = str(new_cases)\n \n stats = data[['CumConfirmed', 'CumDeaths', 'CumRecovered', 'CumActive', 'MortalityRateInfection', 'MortalityRateClosed']].iloc[-1].tolist() + \\\n [new_cases + ' (' + str(data['DiffYesterday'].iat[-2]) + '%)']\n\n # if country == 'Egypt':\n # stats[0] = max(stats[0], confirmed_eg) \n # stats[1] = max(stats[1], deaths_eg) \n # stats[2] = max(stats[2], recovers_eg) \n\n for stat in range(-3, -1):\n stats[stat] = str(stats[stat]) + '%'\n \n except:\n stats = [0, 0, 0, 0, 0, 0, 0]\n return stats\n\nif __name__ == '__main__':\n app.run_server(port=8080, debug=True, threaded=True, processes=1)\n ","sub_path":"corona-tracker/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":22902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"283072024","text":"import pickle\n\nfrom config import Config\nfrom os.path import expanduser\nfrom OpenSSL import SSL\nfrom txjsonrpc.netstring import jsonrpc\n\nfrom twisted.application import service, internet\nfrom twisted.python.log import ILogObserver\nfrom twisted.internet import ssl, task\nfrom twisted.web import resource, server\nfrom twisted.web.resource import NoResource\n\nfrom subspace.network import Server\nfrom subspace import log\nfrom subspace.message import *\n\n\n\nsys.path.append(os.path.dirname(__file__))\n\ndatafolder = expanduser(\"~\") + \"/.subspace/\"\n\nf = file(datafolder + 'subspace.conf')\ncfg = Config(f)\n\nusername = cfg.rpcusername if \"rpcusername\" in cfg else \"Username\"\npassword = cfg.rpcpassword if \"rpcpassword\" in cfg else \"Password\"\nbootstrap_node = cfg.bootstrapnode if \"bootstrapnode\" in cfg else \"1.2.3.4\"\nbootstrap_port = cfg.bootstrapport if \"bootstrapport\" in cfg else \"8335\"\n\nif os.path.isfile(datafolder + 'keys.pickle'):\n privkey = pickle.load(open(datafolder + \"keys.pickle\", \"rb\"))\nelse:\n privkey = random_key()\n pickle.dump(privkey, open(datafolder + \"keys.pickle\", \"wb\"))\n\npubkey = encode_pubkey(privkey_to_pubkey(privkey), \"hex_compressed\")\n\napplication = service.Application(\"subspace\")\napplication.setComponent(ILogObserver, log.FileLogObserver(sys.stdout, log.INFO).emit)\n\nif os.path.isfile('cache.pickle'):\n kserver = Server.loadState('cache.pickle')\nelse:\n kserver = Server()\n kserver.bootstrap([(bootstrap_node, bootstrap_port)])\nkserver.saveStateRegularly('cache.pickle', 10)\nudpserver = internet.UDPServer(cfg.port if \"port\" in cfg else 8335, kserver.protocol)\nudpserver.setServiceParent(application)\n\nclass ChainedOpenSSLContextFactory(ssl.DefaultOpenSSLContextFactory):\n def __init__(self, privateKeyFileName, certificateChainFileName,\n sslmethod=SSL.SSLv23_METHOD):\n \"\"\"\n @param privateKeyFileName: Name of a file containing a private key\n @param certificateChainFileName: Name of a file containing a certificate chain\n @param sslmethod: The SSL method to use\n \"\"\"\n self.privateKeyFileName = privateKeyFileName\n self.certificateChainFileName = certificateChainFileName\n self.sslmethod = sslmethod\n self.cacheContext()\n\n def cacheContext(self):\n ctx = SSL.Context(self.sslmethod)\n ctx.use_certificate_chain_file(self.certificateChainFileName)\n ctx.use_privatekey_file(self.privateKeyFileName)\n self._context = ctx\n\n# Web-Server\nclass WebResource(resource.Resource):\n def __init__(self, kserver):\n resource.Resource.__init__(self)\n self.kserver = kserver\n # throttle in seconds to check app for new data\n self.throttle = .25\n # define a list to store client requests\n self.delayed_requests = []\n # define a list to store incoming keys from new POSTs\n self.incoming_posts = []\n # setup a loop to process delayed requests\n loopingCall = task.LoopingCall(self.processDelayedRequests)\n loopingCall.start(self.throttle, False)\n\n def getChild(self, child, request):\n return self\n\n def render_GET(self, request):\n def respond(value):\n value = value or NoResource().render(request)\n request.write(value)\n request.finish()\n log.msg(\"Getting key: %s\" % request.path.split('/')[-1])\n d = self.kserver.get(request.path.split('/')[-1])\n if d is not None:\n respond(d)\n return server.NOT_DONE_YET\n else:\n self.delayed_requests.append(request)\n return server.NOT_DONE_YET\n\n def render_POST(self, request):\n key = request.path.split('/')[-1]\n value = request.content.getvalue()\n log.msg(\"Setting %s = %s\" % (key, value))\n self.kserver.set(key, value)\n self.incoming_posts.append(key)\n return value\n\n def processDelayedRequests(self):\n \"\"\"\n Processes the delayed requests that did not have\n any data to return last time around.\n \"\"\"\n\nif \"server\" in cfg:\n server_protocol = server.Site(WebResource(kserver))\n if \"useSSL\" in cfg:\n webserver = internet.SSLServer(cfg.serverport if \"serverport\" in cfg else 8080,\n server_protocol,\n ChainedOpenSSLContextFactory(cfg.sslkey, cfg.sslcert))\n #webserver = internet.SSLServer(8335, website, ssl.DefaultOpenSSLContextFactory(options[\"sslkey\"], options[\"sslcert\"]))\n else:\n webserver = internet.TCPServer(cfg.serverport if \"serverport\" in cfg else 8080, server_protocol)\n webserver.setServiceParent(application)\n\n# RPC-Server\nclass RPCCalls(jsonrpc.JSONRPC):\n \"\"\"An example object to be published.\"\"\"\n\n def jsonrpc_getpubkey(self):\n return pubkey\n\n def jsonrpc_getprivkey(self):\n return privkey\n\n def jsonrpc_getmessages(self):\n return MessageDecoder(privkey, kserver).getMessages()\n\n def jsonrpc_send(self, pubkey, message):\n r = kserver.getRange()\n if r is False:\n return \"Counldn't find any peers. Maybe check your internet connection?\"\n else:\n blocks = MessageEncoder(pubkey, privkey, message, r).getblocks()\n items = blocks.items()\n random.shuffle(items)\n for key, value in items:\n log.msg(\"Setting %s = %s\" % (key, value))\n kserver.set(key, value)\n return \"Message sent successfully\"\n\nfactory = jsonrpc.RPCFactory(RPCCalls)\n\nfactory.addIntrospection()\n\njsonrpcServer = internet.TCPServer(7080, factory, interface='127.0.0.1')\njsonrpcServer.setServiceParent(application)\n\n","sub_path":"subspaced.py","file_name":"subspaced.py","file_ext":"py","file_size_in_byte":5684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"522243805","text":"import sys\ndef openplus(s):\n if s == \"-\":\n return sys.stdin\n else:\n return open(s)\n\ncolnr=int(sys.argv[1])\nlines = [map(str.split, openplus(f).readlines()) for f in sys.argv[2:]]\nprint (\"\\n\".join([\" \".join([line[i][colnr] for line in lines]) for i in range(len(lines[0]))]))\n","sub_path":"architecture/choose-column-from-multiple-files.py","file_name":"choose-column-from-multiple-files.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"649039539","text":"# TextifyVision.py - Team BDF - CS 298-01 S19 WCSU\n\n# This python script provides functions for using Textiy's implementation of\n# Micrisoft Azure's Computer Vision API\n\n# The current implementation is based off of this quick-start guide:\n# https://docs.microsoft.com/en-us/azure/cognitive-services/Computer-vision/QuickStarts/python-analyze\n\nimport requests\nimport json\nimport authentication\n\nANALYZE_URL = authentication.cvBaseURL + \"/vision/v2.0/analyze\"\n\n# 0: a basketball about to enter a baskeball hoop\n# 1: a plate of pasta topped with cheese and basil with bread in the background\nurlsToAnalyze = [\"https://i.imgur.com/045nXnj.jpg\", \"https://i.imgur.com/U598smz.jpg\"]\n\nfor imageURL in urlsToAnalyze:\n callHeader = {\"Ocp-Apim-Subscription-Key\": authentication.cvKey1}\n callParams = {'visualFeatures': \"Categories,Description,Color\"}\n callData = {\"url\": imageURL}\n\n response = requests.post(ANALYZE_URL, headers=callHeader, params=callParams, json=callData)\n response.raise_for_status()\n\n result = response.json()\n #print(json.dumps(result))\n\n imageCaption = result[\"description\"][\"captions\"][0][\"text\"]\n captionConfidence = result[\"description\"][\"captions\"][0][\"confidence\"]\n print(\"I am \" + str(captionConfidence * 100) + \"% sure that this is:\")\n print(imageCaption + \"\\n\")\n","sub_path":"src/TextifyVision.py","file_name":"TextifyVision.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"129604718","text":" \n\nimport re\n\n # 验证QQ号, match\ndef main1():\n username = input('input username:')\n qq = input('input qq number:')\n\n m1 = re.match(r'^[0-9a-zA-Z_]{6,20}$', username) # 不带转义\n if not m1:\n print('plz input valid username.')\n \n m2 = re.match(r'^[1-9]\\d{4,11}$', qq)\n if not m2:\n print('plz input valid qq number.')\n\n if m1 and m2:\n print('valid username and qq')\n\n# 手机号 compile, findall, finditer, group, search\ndef main2():\n pattern = re.compile(r'(?<=\\D)1[34578]\\d{9}(?=\\D)')\n sentence = '''\n 重要的事说8130123456789遍,我的手机号是135123456789这个靓号,\n 不是15600998765,也是110或119,王大锤的手机才是15600998765。\n '''\n\n mylist = re.findall(pattern, sentence)\n print(mylist)\n for temp in pattern.finditer(sentence):\n print(temp.group())\n \n m = pattern.search(sentence)\n while m:\n print(m.group())\n m = pattern.search(sentence, m.end())\n\n# 和谐\ndef main3():\n sentence = '你丫是傻叉吗?我操你大爷的。Fuck you.'\n purified = re.sub('[操艹]|fuck|shit|傻[比逼叉缺吊]|煞笔', '*', sentence, flags=re.IGNORECASE)\n print(purified)\n\n# 分句\ndef main():\n poem = '床前明月光,疑是地上霜。举头望明月,低头思故乡。'\n sentence_list = re.split(r'[,。,.]', poem)\n while '' in sentence_list:\n sentence_list.remove('')\n print(sentence_list)\n\nif __name__ == '__main__':\n main()","sub_path":"012.py","file_name":"012.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"56609786","text":"from datetime import datetime\r\n\r\n\r\ndef validate_request(req):\r\n if ('action' and 'time') in req:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef form_response(req, code, data=None):\r\n return {'action': req.get('action'),\r\n 'time': datetime.now().timestamp(),\r\n 'code': code,\r\n 'data': data}\r\n","sub_path":"messenger/server/protocol.py","file_name":"protocol.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"355808969","text":"#!/usr/bin/env python3\n\nimport glob\nimport os\nimport time\n\nfrom attr import attrs, attrib\nimport toml\n\ndef wait_for_monitor(p):\n print(f\"Waiting for {p}...\")\n g = []\n while not g:\n g = glob.glob(p)\n return g[0]\n\n@attrs\nclass Hwmon():\n path = attrib(converter = wait_for_monitor)\n probes = attrib(default = [])\n\n@attrs\nclass Fangroup():\n probe = attrib(default = None)\n temp_min = attrib(default = 20)\n temp_max = attrib(default = 90)\n speed = attrib(default = [])\n speed_min = attrib(default = -1)\n pwm = attrib(default = [])\n pwm_stop = attrib(default = 0)\n pwm_start = attrib(default = 127)\n pwm_min = attrib(default = 63)\n pwm_max = attrib(default = 255)\n pwm_fixed = attrib(default = -1)\n\ndef p(path):\n path = path.split(\"/\")\n return str(os.path.join(hwmons[path[0]].path, path[1]))\n\ndef echo(value, path):\n with open(p(path), \"w\") as f:\n f.write(str(value))\n\ndef cat(path):\n with open(p(path)) as f:\n return int(f.read().strip())\n\n# Load config\nwith open(\"/etc/metalfan.toml\") as f:\n config = toml.loads(f.read())\ninterval = config.get(\"interval\", 1)\nhwmons = {name: Hwmon(**params) for name, params in config[\"hwmon\"].items()}\nfangroups = [Fangroup(**fg) for fg in config[\"fangroup\"]]\n\n# Set all fans to \"manual\" mode\nfor fg in fangroups:\n for pwm in fg.pwm:\n echo(1, f\"{pwm}_enable\")\n if fg.pwm_fixed >= 0:\n echo(fg.pwm_fixed, pwm)\n\nprint(\"Starting polling loop...\")\n\nwhile True:\n # Gather temps\n for name, hm in hwmons.items():\n hm.temps = dict()\n for pb in hm.probes:\n hm.temps[pb] = cat(f\"{name}/{pb}_input\") / 1000\n\n for fg in fangroups:\n if fg.pwm_fixed < 0:\n pb = fg.probe.split(\"/\")\n temp = hwmons[pb[0]].temps[pb[1]]\n\n if temp < fg.temp_min:\n pwm = fg.pwm_stop\n elif temp >= fg.temp_max:\n pwm = fg.pwm_max\n else:\n heat = ((temp - fg.temp_min) / (fg.temp_max - fg.temp_min)) ** 2\n pwm = int((fg.pwm_max - fg.pwm_min) * heat + fg.pwm_min)\n\n for ctl, speed in zip(fg.pwm, fg.speed):\n if pwm > fg.pwm_stop and cat(f\"{speed}_input\") < fg.speed_min:\n echo(fg.pwm_start, ctl)\n else:\n echo(pwm, ctl)\n\n time.sleep(interval)\n","sub_path":"metalfan.py","file_name":"metalfan.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"131127475","text":"# coding=utf-8\n'''\nauthor:马维畅\ntime:2019/1/21 10:45\n'''\n\nimport os, sys, re\n\n\ndef lastline():\n global pos\n\n while True:\n pos = pos - 1\n try:\n f.seek(pos, 2) # 从文件末尾开始读\n if f.read(1) == '\\n':\n break\n except: # 到达文件第一行,直接读取,退出\n f.seek(0, 0)\n print(f.readline().strip())\n\n return\n\n print(f.readline().strip())\n\n\nif __name__ == \"__main__\":\n\n f = open('elasticsearch-2019-02-21.log', 'rb') # ‘r’的话会有两个\\n\\n\n pos = 0\n for line in range(10): # 需要倒数多少行就循环多少次\n lastline()\n f.close()\n","sub_path":"IO-file/read_log.py","file_name":"read_log.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"254285568","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 2 18:20:41 2020\n\n@author: usingh\n\"\"\"\n\nfrom pyrpipe import pyrpipe_utils as pu\nfrom pyrpipe import pyrpipe_engine as pe\nimport os\n\nclass Quant:\n \"\"\"This is an abstract class for quantification programs.\n \"\"\"\n def __init__(self,index=\"\"):\n self.category=\"Quantification\"\n self.passedArgumentDict={}\n self.index=index\n \n def build_index(self):\n \"\"\"function to create an index used by the quantification program\n \"\"\"\n pass\n \n def check_index(self):\n \"\"\"Function to check if index of this object is valid and exists\n \"\"\"\n \n def perform_quant(self,sra_object):\n \"\"\"Function to perform quant taking and sraobject as input\n \n \"\"\"\n pass\n \nclass Kallisto(Quant):\n \"\"\"This class represents kallisto\n \n kallisto_index: string\n path to kallisto index\n threads: int\n num threads to use\n \"\"\"\n \n def __init__(self,kallisto_index,threads=None):\n super().__init__() \n self.programName=\"kallisto\"\n self.dep_list=[self.programName] \n if not pe.check_dependencies(self.dep_list):\n raise Exception(\"ERROR: \"+ self.programName+\" not found.\")\n \n \"\"\"\n ##kallisto index\n self.validArgsIndex=['-i','--index','-k','--kmer-size','--make-unique']\n ##kallisto quant\n self.validArgsQuant=['-i','--index','-o','--output-dir','--bias','-b','--bootstrap-samples',\n '--seed','--plaintext','--fusion','--single','--fr-stranded','--rf-stranded',\n '-l','--fragment-length','-s','--sd','-t','--threads','--pseudobam']\n ##kallisto pseudo\n self.validArgsPseudo=['-i','--index','-o','--output-dir','-u','--umi','-b','--batch',\n '--single','-l','--fragment-length','-s','--sd','-t','--threads','--pseudobam']\n ##kallisto h5dump\n self.validArgsh5dump=['-o','--output-dir']\n \n self.valid_args=pu.get_union(self.validArgsIndex,self.validArgsQuant,self.validArgsPseudo,self.validArgsh5dump)\n \"\"\"\n \n if not threads:\n threads=os.cpu_count()\n self.threads=threads\n \n #if index is passed, update the passed arguments\n if len(kallisto_index)>0 and pu.check_files_exist(kallisto_index):\n print(\"kallisto index is: \"+kallisto_index)\n self.kallisto_index=kallisto_index\n else:\n print(\"No kallisto index provided. Please use build_index() now to generate an index...\")\n \n def build_index(self,index_path,index_name,fasta,verbose=False,quiet=False,logs=True,objectid=\"NA\",**kwargs):\n \"\"\"Function to build kallisto index\n \n index_path: str\n path to the output directory\n index_name: str\n index name\n verbose: bool\n Print stdout and std error\n quiet: bool\n Print nothing\n logs: bool\n Log this command to pyrpipe logs\n objectid: str\n Provide an id to attach with this command e.g. the SRR accession. This is useful for debugging, benchmarking and reports.\n kwargs: dict\n Options to pass to kallisto. This will override the existing options in self.passed_args_dict (only replace existing arguments and not replace all the arguments).\n \n :return: Status of kallisto index\n :rtype: bool\n \"\"\"\n \n #check input\n if not pu.check_files_exist(fasta):\n pu.print_boldred(\"{} does not exist. Exiting\".format(fasta))\n return False\n \n #create out dir\n if not pu.check_paths_exist(index_path):\n if not pu.mkdir(index_path):\n print(\"ERROR in building kallisto index. Failed to create index directory.\")\n return False\n \n indexOut=os.path.join(index_path,index_name)\n \n #if not threads:\n # threads=self.threads\n #no threads in build index\n \n newOpts={\"--\":(fasta,),\"-i\":indexOut}\n mergedOpts={**newOpts,**kwargs}\n \n #call kallisto\n status=self.run_kallisto(\"index\",verbose=verbose,quiet=quiet,logs=logs,objectid=objectid,**mergedOpts)\n \n if status:\n #check if index file is present \n if pu.check_files_exist(indexOut):\n self.kallisto_index=indexOut\n pu.print_green(\"kallisto_index is:\"+self.kallisto_index)\n return True\n else:\n pu.print_boldred(\"Failed to create kallisto index\")\n return False\n \n def perform_quant(self,sra_object,out_dir=\"\",threads=None,verbose=False,quiet=False,logs=True,objectid=\"NA\",**kwargs):\n \"\"\"Run kallisto quant\n \n sra_object: SRA\n SRA object contatining paths to fastq files\n index_path: str\n path to the output directory\n index_name: str\n index name\n threads: int\n Number of threads\n verbose: bool\n Print stdout and std error\n quiet: bool\n Print nothing\n logs: bool\n Log this command to pyrpipe logs\n objectid: str\n Provide an id to attach with this command e.g. the SRR accession. This is useful for debugging, benchmarking and reports.\n kwargs: dict\n Options to pass to kallisto. This will override the existing options\n\n :return: Path to kallisto out directory\n :rtype: string\n \"\"\"\n \n if not out_dir:\n out_dir=os.path.join(sra_object.location,\"kallisto_out\")\n \n if not threads:\n threads=self.threads\n \n \n if sra_object.layout == 'PAIRED':\n newOpts={\"--threads\":str(threads),\"-o\":out_dir,\"--\":(sra_object.localfastq1Path,sra_object.localfastq2Path),\"-i\":self.kallisto_index}\n else:\n newOpts={\"--threads\":str(threads),\"-o\":out_dir,\"--single\":\"\", \"--\":(sra_object.localfastqPath,),\"-i\":self.kallisto_index}\n \n \n #add input files to kwargs, overwrite newOpts if kwargs is present\n mergedOpts={**newOpts,**kwargs}\n \n #call kallisto\n status=self.run_kallisto(\"quant\",verbose=verbose,quiet=quiet,logs=logs,objectid=sra_object.srr_accession,**mergedOpts)\n \n if status:\n #check if sam file is present in the location directory of sra_object\n if pu.check_files_exist(os.path.join(out_dir,\"abundance.tsv\")):\n return out_dir\n \n pu.print_boldred(\"kallisto quant failed\")\n return \"\"\n \n \n \n def run_kallisto(self,subcommand,valid_args=None,verbose=False,quiet=False,logs=True,objectid=\"NA\",**kwargs):\n \"\"\"Wrapper for running kallisto.\n \n Parameters\n ----------\n \n subcommand: str\n subcommand for kallisto\n valid_args: list\n List of valid arguments, arguments in kwargs not in this list will be ignored\n verbose: bool\n Print stdout and std error\n quiet: bool\n Print nothing\n logs: bool\n Log this command to pyrpipe logs\n objectid: str\n Provide an id to attach with this command e.g. the SRR accession. This is useful for debugging, benchmarking and reports.\n kwargs: dict\n Options to pass to kallisto. This will override the existing options\n\n :return: Returns the status of kallisto. True is passed, False if failed.\n :rtype: bool\n \"\"\"\n \n #check for a valid index\n if subcommand!=\"index\":\n if not self.check_index():\n raise Exception(\"ERROR: Invalid kallisto index. Please run build index to generate an index.\")\n \n \n kallisto_Cmd=['kallisto',subcommand]\n kallisto_Cmd.extend(pu.parse_unix_args(valid_args,kwargs))\n \n #start ececution\n status=pe.execute_command(kallisto_Cmd,verbose=verbose,quiet=quiet,logs=logs,objectid=objectid,command_name=\" \".join(kallisto_Cmd[0:2]))\n if not status:\n pu.print_boldred(\"kallisto failed\")\n return status \n \n def check_index(self):\n \"\"\"Check valid kallisto index\n \"\"\"\n if hasattr(self,'kallisto_index'):\n return(pu.check_files_exist(self.kallisto_index))\n return False\n \n\n\nclass Salmon(Quant):\n \"\"\"This class represents salmon\n \n salmon_index: string\n Path to salmon index\n threads: int\n Number of threads\n \"\"\" \n def __init__(self,salmon_index,threads=None): \n super().__init__() \n self.programName=\"salmon\"\n self.dep_list=[self.programName] \n if not pe.check_dependencies(self.dep_list):\n raise Exception(\"ERROR: \"+ self.programName+\" not found.\")\n \n \"\"\"\n ##salmon index\n self.validArgsIndex=['-v','--version','-h','--help','-t','--transcripts','-k','--kmerLen','-i',\n '--index','--gencode','--keepDuplicates','-p','--threads','--perfectHash',\n '--type','-s','--sasamp']\n ##salmon quant read\n self.validArgsQuantReads=['--help-reads','-i','--index','-l','--libType','-r','--unmatedReads',\n '-1','--mates1','-2','--mates2','-o','--output','--discardOrphansQuasi',\n '--allowOrphansFMD','--seqBias','--gcBias','-p','--threads','--incompatPrior',\n '-g','--geneMap','-z','--writeMappings','--meta','--alternativeInitMode',\n '--auxDir','-c','--consistentHits','--dumpEq','-d','--dumpEqWeights',\n '--fasterMapping','--minAssignedFrags','--reduceGCMemory','--biasSpeedSamp',\n '--strictIntersect','--fldMax','--fldMean','--fldSD','-f','--forgettingFactor',\n '-m','--maxOcc','--initUniform','-w','--maxReadOcc','--noLengthCorrection',\n '--noEffectiveLengthCorrection','--noFragLengthDist','--noBiasLengthThreshold',\n '--numBiasSamples','--numAuxModelSamples','--numPreAuxModelSamples','--useVBOpt',\n '--rangeFactorizationBins','--numGibbsSamples','--numBootstraps','--thinningFactor',\n '-q','--perTranscriptPrior','--vbPrior','--writeOrphanLinks','--writeUnmappedNames',\n '-x','--quasiCoverage']\n ##salmon quant alignment\n self.validArgsQuantAlign=['--help-alignment','-l','--libType','-a','--alignments','-t','--targets','-p',\n '--threads','--seqBias','--gcBias','--incompatPrior','--useErrorModel',\n '-o','--output','--meta','-g','--geneMap','--alternativeInitMode','--auxDir'\n ,'--noBiasLengthThreshold','--dumpEq','-d','--dumpEqWeights','--fldMax',\n '--fldMean','--fldSD','-f','--forgettingFactor','--minAssignedFrags',\n '--gencode','--reduceGCMemory','--biasSpeedSamp','--mappingCacheMemoryLimit',\n '-w','--maxReadOcc','--noEffectiveLengthCorrection','--noFragLengthDist',\n '-v','--useVBOpt','--rangeFactorizationBins','--perTranscriptPrior','--vbPrior',\n '--numErrorBins','--numBiasSamples','--numPreAuxModelSamples','--numAuxModelSamples',\n '-s','--sampleOut','-u','--sampleUnaligned','--numGibbsSamples','--numBootstraps',\n '--thinningFactor']\n ##salmon quantmerge\n self.validArgsQuantMerge=['--quants','--names','-c','--column','-o','--output']\n\n self.valid_args=pu.get_union(self.validArgsIndex,self.validArgsQuantReads,self.validArgsQuantAlign,self.validArgsQuantMerge)\n \"\"\"\n \n if not threads:\n threads=os.cpu_count()\n \n self.threads=threads\n \n #if index is passed, update the passed arguments\n if len(salmon_index)>0 and pu.check_salmon_index(salmon_index):\n print(\"salmon index is: \"+salmon_index)\n self.salmon_index=salmon_index\n else:\n print(\"No salmon index provided. Please build index now to generate an index...\")\n \n \n \n def build_index(self,index_path,index_name,fasta,threads=None,verbose=False,quiet=False,logs=True,objectid=\"NA\",**kwargs):\n \"\"\"\n build salmon index and store the path to index in self\n \n index_path: str\n path to the output directory\n index_name: str\n index name\n fasta: str\n Path to fasta file\n threads: int\n Number of threads\n verbose: bool\n Print stdout and std error\n quiet: bool\n Print nothing\n logs: bool\n Log this command to pyrpipe logs\n objectid: str\n Provide an id to attach with this command e.g. the SRR accession. This is useful for debugging, benchmarking and reports.\n kwargs: dict\n Options to pass to salmon. This will override the existing options\n \n :return: status of salmon index\n :rtype: bool\n \"\"\"\n \n #check input\n if not pu.check_files_exist(fasta):\n pu.print_boldred(\"{} does not exist. Exiting\".format(fasta))\n return False\n #create out dir\n if not pu.check_paths_exist(index_path):\n if not pu.mkdir(index_path):\n print(\"ERROR in building hisat2 index. Failed to create index directory.\")\n return False\n indexOut=os.path.join(index_path,index_name)\n \n if not threads:\n threads=self.threads\n \n newOpts={\"--threads\":str(threads),\"-t\":fasta,\"-i\":indexOut}\n \n mergedOpts={**newOpts,**kwargs}\n \n #call salmon\n status=self.run_salmon(\"index\",verbose=verbose,quiet=quiet,logs=logs,objectid=objectid,**mergedOpts)\n \n if status:\n #check if sam file is present in the location directory of sra_object\n #if check_files_exist(os.path.join(indexOut,\"versionInfo.json\")): #not sure if this is reliable\n if pu.check_paths_exist(indexOut):\n self.salmon_index=indexOut\n pu.print_green(\"salmon index is:\"+self.salmon_index)\n return True\n \n pu.print_boldred(\"Failed to create salmon index\")\n return False\n \n \n \n def perform_quant(self,sra_object,out_dir=\"\",lib_type=None,threads=None,verbose=False,quiet=False,logs=True,objectid=\"NA\",**kwargs):\n \"\"\"run salmon quant\n sra_object: SRA\n An SRA object with valid fastq files\n lib_type: str\n Library type. Default:A\n threads: int\n Num threads to use\n verbose: bool\n Print stdout and std error\n quiet: bool\n Print nothing\n logs: bool\n Log this command to pyrpipe logs\n objectid: str\n Provide an id to attach with this command e.g. the SRR accession. This is useful for debugging, benchmarking and reports.\n kwargs: dict\n Options to pass to salmon. This will override the existing options\n\n :return: Path to salmon out directory\n :rtype: string\n \"\"\"\n if not lib_type:\n lib_type=\"A\"\n \n if not out_dir:\n out_dir=os.path.join(sra_object.location,\"salmon_out\")\n \n if not threads:\n threads=self.threads\n \n if sra_object.layout == 'PAIRED':\n newOpts={\"--threads\":str(threads),\"-o\":out_dir,\"-l\":lib_type,\"-1\":sra_object.localfastq1Path,\"-2\":sra_object.localfastq2Path,\"-i\":self.salmon_index}\n else:\n newOpts={\"--threads\":str(threads),\"-o\":out_dir,\"-l\":lib_type,\"-r\":sra_object.localfastqPath,\"-i\":self.salmon_index}\n \n \n #add input files to kwargs, overwrite newOpts with kwargs\n mergedOpts={**newOpts,**kwargs}\n \n #call salmon\n status=self.run_salmon(\"quant\",verbose=verbose,quiet=quiet,logs=logs,objectid=sra_object.srr_accession,**mergedOpts)\n \n if status:\n #check if sam file is present in the location directory of sra_object\n if pu.check_files_exist(os.path.join(out_dir,\"quant.sf\")):\n return out_dir\n \n pu.print_boldred(\"salmon quant failed\")\n return \"\"\n \n \n \n def run_salmon(self,subcommand,valid_args=None,verbose=False,quiet=False,logs=True,objectid=\"NA\",**kwargs):\n \"\"\"Wrapper for running salmon.\n \n Parameters\n ----------\n \n subcommand: str\n subcommand for salmon\n valid_args: list\n List of valid arguments\n verbose: bool\n Print stdout and std error\n quiet: bool\n Print nothing\n logs: bool\n Log this command to pyrpipe logs\n objectid: str\n Provide an id to attach with this command e.g. the SRR accession. This is useful for debugging, benchmarking and reports.\n kwargs: dict\n Options to pass to salmon. This will override the existing options\n\n :return: Returns the status of salmon. True is passed, False if failed.\n :rtype: bool\n \"\"\"\n \n #check for a valid index\n if subcommand!=\"index\":\n if not self.check_index():\n raise Exception(\"ERROR: Invalid salmon index. Please run build index to generate an index.\")\n \n \n salmon_Cmd=['salmon',subcommand]\n salmon_Cmd.extend(pu.parse_unix_args(valid_args,kwargs))\n \n #start ececution\n status=pe.execute_command(salmon_Cmd,verbose=verbose,quiet=quiet,logs=logs,objectid=objectid,command_name=\" \".join(salmon_Cmd[0:2]))\n if not status:\n pu.print_boldred(\"salmon failed\")\n return status \n\n def check_index(self):\n if hasattr(self,'salmon_index'):\n return pu.check_salmonindex(self.salmon_index)\n return False\n \n \n","sub_path":"pyrpipe/quant.py","file_name":"quant.py","file_ext":"py","file_size_in_byte":18656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"536201713","text":"import logging, sys\n\nlogger = logging.getLogger(__name__)\n# set level\nlogger.setLevel(logging.DEBUG)\n#create a console handler\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\n\n# create a formatter\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n# add a formatter to ch\nch.setFormatter(formatter)\n\n# add ch to logger\nlogger.addHandler(ch)\n\n# application code\nlogger.debug('debug messsage')\nlogger.info('info message')\nlogger.warning('warning message')\nlogger.error('error message')\nlogger.critical('critical message')\n","sub_path":"Advanced/ScratchLogger.py","file_name":"ScratchLogger.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"558024406","text":"import csv\nimport os\nimport numpy\n\n'''\nINTRODUCTION\n============\n\nBen Gelman\nMIT\nUser file construction\n\nThis script will create a file for each user in the feature-extracted\ndatabase. Within each user file, there will be a value for every feature\nfor every week of the course. Default values wil be input in order to \nensure there are no missing values for any user or feature. Essentially,\neach user will be represented as a matrix of feature values over time.\n\n\nIMPORTANT USE INFORMATION: MUST READ\n====================================\n\n* Your feature-extracted database MUST be sorted by longitudinal_feature_id,\n from lowest to highest. This is done automatically by feature extraction\n scripts, but results will be incorrect if unsorted.\n\n* This script requires numpy to efficiently filter users that have 0 for all\n feature values. \n\n* Fill in the script inputs directly below\n'''\n\n#number of weeks in the course. Watch out for ZERO INDEXING. If the num_weeks\n#is set to 15, that will process week 0 through week 14. \nnum_weeks = 15\n#This is the name of the csvfile holding the output of feature extraction\ncsvname = \"test_6002x_fall_2012.csv\"\n#This is the name of the OUTPUT DIRECTORY. File names will have features \n#appended to this value.\noutput_suffix = \"6002x_fall_2012\"\n#Change this to true if you WANT TO FILTER ZEROED USERS.\n#these are users who have zero for ALL their feature values.\nzero_users = True\n\n'''\nmain file io\nThis function has to take each user id and create a list of lists for it.\nEach inner list is a row of the user_matrix and it has to contain the values\nfor each feature. These lists need to be constructed in the order of feature\nids created by feature extraction (simple numerical ascending sort).\nIn order to achieve this, we have to build up every user's list at the same\ntime, starting from the first feature. \n'''\ndef file_parse():\n\n\t#some data holding variables we'll need for later\n\tuser_set = set()\n\tuser_dict = {}\n\tfeature_id_list = []\n\n\t#create the reader which will parse feature-extracted database\n\tcsvfile = open(csvname,'rb')\n\treader = csv.reader(csvfile,dialect='excel')\n\t#remove the header.\n\treader.next()\n\n\t#unfortunately, we require a pass through the data to make sure that we \n\t#have every unique possible user and every unique feature_id. The problem\n\t#is that not every user has an entry for every feature, so we have to make\n\t#sure that every feature table has every user. Thus, we have to keep track.\n\tfor row in reader:\n\t\tif row[2] not in user_dict.keys():\n\t\t\tuser_set.add(row[2])\n\t\tif row[1] not in feature_id_list:\n\t\t\tfeature_id_list.append(row[1])\n\n\t#now for each user, we have to create a list of lists. The outer list\n\t#should hold M lists, where M is the number of features. The inner lists\n\t#should hold N entries, where N is the num_weeks of the course.\n\tfor user in user_set:\n\t\tuser_dict[user] = [([0]*num_weeks) for i in range(len(feature_id_list))]\n\tuser_set.clear()\n\n\t#return back to the top of the file and skip the header.\n\tcsvfile.seek(0)\n\treader.next()\n\n\t#with the user dict set up completely with default values, we can now\n\t#go through the input csv and actually populate the dict with \n\t#existing values.\n\tfor row in reader:\n\n\t\t#first, we have to check if the feature week is outside the bounds\n\t\t#of the class. We are removing these so that every student's feature\n\t\t#vector is the same length.\n\t\tif (int(row[3]) < 0) or (int(row[3]) > (num_weeks-1)):\n\t\t\tcontinue\n\n\t\t#otherwise, this is a valid row. So we index into the dictionary\n\t\t#the user id, and choose the correct list from the list of lists\n\t\t#by getting the index of the feature from feature_id_list.\n\t\t#then we index into THAT list with the week number. \n\t\tuser_dict[row[2]][feature_id_list.index(row[1])][int(row[3])] = row[4]\n\n\t#once we are out of the for loop, our user_dict should be completely\n\t#populated with the existing values, and all other values defaulted.\n\tcsvfile.close()\n\twrite_users(user_dict,feature_id_list)\n\n'''\nDoes all the writing to output files.\n'''\ndef write_users(user_dict,feature_id_list):\n\n\t#first create the folder we will write to\n\tif not os.path.exists(output_suffix):\n\t\tos.mkdir(\"u_\" + output_suffix)\n\n\t#each user gets a file\n\tfor user in user_dict.keys():\n\n\t\tcsvout = open(\"./u_\"+output_suffix+\"/user_\"+user+\"_\"+output_suffix+\".csv\",'wb')\n\t\twriter = csv.writer(csvout, dialect=\"excel\")\n\n\t\t#write the header\n\t\theader = [\"feat_num\"]\n\t\tfor i in range(num_weeks):\n\t\t\theader.append(\"week\"+str(i))\n\t\twriter.writerow(header)\n\n\t\t#for each list in user_dict, retrieve the feature number from\n\t\t#the feature_id_list and add it on to the list of values. \n\t\ttemp_list = user_dict[user]\n\t\tfor i in range(len(temp_list)):\n\t\t\twrite_this = [\"feature\" + feature_id_list[i]] + temp_list[i]\n\t\t\twriter.writerow(write_this)\n\n\t\t#now the user is written, so we can close this csv file.\n\t\tcsvout.close()\n\n\n'''\nmain stuff\n'''\ndef main():\n\n\tfile_parse()\n\nmain()","sub_path":"user_file_creation.py","file_name":"user_file_creation.py","file_ext":"py","file_size_in_byte":4912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"281004689","text":"from django.contrib import messages\nfrom django.http import HttpResponseRedirect, JsonResponse\nfrom django.shortcuts import render\nfrom django.template.loader import render_to_string\nfrom django.urls import reverse\nfrom django.views import View\nfrom django.views.generic import ListView\n\nfrom document.forms import CreateDocumentForm\nfrom document.models import Document\n\n\nclass HomeView(View):\n\n def get(self, request, *args, **kwargs):\n return render(request, 'base.html', context={})\n\n\nclass DocumentListView(ListView):\n template_name = 'document_list.html'\n paginate_by = 10\n\n def get_queryset(self):\n return Document.objects.filter(owner=self.request.user)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Moja lista dokumentów'\n return context\n\n\ndef create_document(request):\n form = CreateDocumentForm(request.POST or None)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.owner = request.user\n instance.save()\n messages.info(request, f\"Dodano nowy dokument o nazwie: {form.cleaned_data['name']}\")\n return HttpResponseRedirect(reverse('document_list'))\n else:\n form = CreateDocumentForm()\n\n context = {\n 'form': form\n }\n\n return render(request, 'create_document.html', context)\n\n\ndef update_document(request, id):\n document = Document.objects.get(id=id)\n form = CreateDocumentForm(request.POST or None, instance=document)\n\n if request.method == 'POST':\n if form.is_valid():\n form.save()\n messages.info(request, f\"Zaktualizowano dokument o nazwie: {form.cleaned_data['name']}\")\n return HttpResponseRedirect(reverse('document_list'))\n else:\n form = CreateDocumentForm(instance=document)\n\n render_form = render_to_string('form_update_document.html', context={'form': form})\n\n context = {\n 'form': render_form,\n 'modal_title': f\"Edycja {document.name}\",\n 'object': document\n }\n\n html_form = render_to_string('ajax_update_object.html', context, request)\n\n return JsonResponse({'html_form': html_form})\n\n\ndef delete_document(request, id):\n document = Document.objects.get(id=id)\n\n if request.method == 'POST':\n document.delete()\n messages.error(request, f\"Usunięto dokument o nazwie: {document.name}\")\n return HttpResponseRedirect(reverse('document_list'))\n\n context = {\n 'modal_title': f\"Usuwanie {document.name}\",\n 'object': document,\n 'text': f\"Czy na pewno chcesz usunąć dokument {document.name}\"\n }\n\n html_form = render_to_string('ajax_delete_object.html', context, request)\n\n return JsonResponse({'html_form': html_form})\n","sub_path":"document/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"183045923","text":"import socket\nimport threading\nimport queue\nimport json\nip=''\nport=10086\nusers=[]#存放所有用户\nque=queue.Queue()#存放消息\nlock=threading.Lock()#防止多线程并发放置消息错误\n\ndef receiveMessage(conn,addr):\n print(\"一个客户端连接进来\",conn,addr)\n users.append(conn)\n try:\n while True:\n data = conn.recv(1024)\n data = data.decode()\n deposit(addr, data)\n except Exception as e:\n deleteUsers(conn)\n conn.close()\n\ndef sendMessage():\n while True:\n if not que.empty():\n data = que.get()\n from1 = data[0]\n data = list(data)\n data = json.dumps(data)\n print(type(data))\n for c in users:\n c.send(data.encode())\n\ndef deposit(addr, data):\n try:\n lock.acquire()\n que.put((addr, data))\n finally:\n lock.release()\n\ndef deleteUsers(conn):\n a = 0\n for i in users:\n if i == conn:\n users.pop(a)\n print(\"剩余在线用户:\",users)\n return\n a += 1\n\ndef main():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((ip, port))\n s.listen()\n send = threading.Thread(target=sendMessage)\n send.start()\n\n while True:\n conn, addr = s.accept()\n r = threading.Thread(target=receiveMessage, args=(conn, addr))\n r.start()\n\nmain()\n","sub_path":"pra/p4/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"23343731","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3350)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/hedley/django/instances/mote/mote/__init__.py\n# Compiled at: 2017-04-24 04:31:00\n# Size of source mod 2**32: 83 bytes\n__version__ = '0.3'\ndefault_app_config = 'mote.apps.MoteConfig'\nPROJECT_PATHS = {}","sub_path":"pycfiles/mote_prk-0.3.1-py2.7/__init__.cpython-35.py","file_name":"__init__.cpython-35.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"559411381","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Common Carla code\n# Copyright (C) 2011-2015 Filipe Coelho \n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License as\n# published by the Free Software Foundation; either version 2 of\n# the License, or any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# For a full copy of the GNU General Public License see the doc/GPL.txt file.\n\n# ------------------------------------------------------------------------------------------------------------\n# Imports (Config)\n\nfrom carla_config import *\n\n# These will be modified during install\nX_LIBDIR_X = None\nX_DATADIR_X = None\n\n# ------------------------------------------------------------------------------------------------------------\n# Imports (Global)\n\nimport os\nimport sys\n\nif config_UseQt5:\n from PyQt5.Qt import PYQT_VERSION_STR\n from PyQt5.QtCore import qFatal, qVersion, qWarning, QDir\n from PyQt5.QtGui import QIcon\n from PyQt5.QtWidgets import QFileDialog, QMessageBox\nelse:\n from PyQt4.Qt import PYQT_VERSION_STR\n from PyQt4.QtCore import qFatal, qVersion, qWarning, QDir\n from PyQt4.QtGui import QFileDialog, QIcon, QMessageBox\n\n# ------------------------------------------------------------------------------------------------------------\n# Import Signal\n\nfrom signal import signal, SIGINT, SIGTERM\n\ntry:\n from signal import SIGUSR1\n haveSIGUSR1 = True\nexcept:\n haveSIGUSR1 = False\n\n# ------------------------------------------------------------------------------------------------------------\n# Imports (Custom)\n\nfrom carla_backend_qt import *\n\n# ------------------------------------------------------------------------------------------------------------\n# Platform specific stuff\n\nif WINDOWS:\n WINDIR = os.getenv(\"WINDIR\")\n\n# ------------------------------------------------------------------------------------------------------------\n# Set Version\n\nVERSION = \"1.9.6 (2.0-beta4)\"\n\n# ------------------------------------------------------------------------------------------------------------\n# Set TMP\n\nenvTMP = os.getenv(\"TMP\")\n\nif envTMP is None:\n if WINDOWS:\n qWarning(\"TMP variable not set\")\n TMP = QDir.tempPath()\nelse:\n TMP = envTMP\n\nif not os.path.exists(TMP):\n qWarning(\"TMP does not exist\")\n TMP = \"/\"\n\ndel envTMP\n\n# ------------------------------------------------------------------------------------------------------------\n# Set HOME\n\nenvHOME = os.getenv(\"HOME\")\n\nif envHOME is None:\n if LINUX or MACOS:\n qWarning(\"HOME variable not set\")\n HOME = QDir.toNativeSeparators(QDir.homePath())\nelse:\n HOME = envHOME\n\nif not os.path.exists(HOME):\n qWarning(\"HOME does not exist\")\n HOME = TMP\n\ndel envHOME\n\n# ------------------------------------------------------------------------------------------------------------\n# Set PATH\n\nenvPATH = os.getenv(\"PATH\")\n\nif envPATH is None:\n qWarning(\"PATH variable not set\")\n if MACOS:\n PATH = (\"/opt/local/bin\", \"/usr/local/bin\", \"/usr/bin\", \"/bin\")\n elif WINDOWS:\n PATH = (os.path.join(WINDIR, \"system32\"), WINDIR)\n else:\n PATH = (\"/usr/local/bin\", \"/usr/bin\", \"/bin\")\nelse:\n PATH = envPATH.split(os.pathsep)\n\ndel envPATH\n\n# ------------------------------------------------------------------------------------------------------------\n# Static MIDI CC list\n\nMIDI_CC_LIST = (\n \"0x01 Modulation\",\n \"0x02 Breath\",\n \"0x03 (Undefined)\",\n \"0x04 Foot\",\n \"0x05 Portamento\",\n \"0x07 Volume\",\n \"0x08 Balance\",\n \"0x09 (Undefined)\",\n \"0x0A Pan\",\n \"0x0B Expression\",\n \"0x0C FX Control 1\",\n \"0x0D FX Control 2\",\n \"0x0E (Undefined)\",\n \"0x0F (Undefined)\",\n \"0x10 General Purpose 1\",\n \"0x11 General Purpose 2\",\n \"0x12 General Purpose 3\",\n \"0x13 General Purpose 4\",\n \"0x14 (Undefined)\",\n \"0x15 (Undefined)\",\n \"0x16 (Undefined)\",\n \"0x17 (Undefined)\",\n \"0x18 (Undefined)\",\n \"0x19 (Undefined)\",\n \"0x1A (Undefined)\",\n \"0x1B (Undefined)\",\n \"0x1C (Undefined)\",\n \"0x1D (Undefined)\",\n \"0x1E (Undefined)\",\n \"0x1F (Undefined)\",\n \"0x46 Control 1 [Variation]\",\n \"0x47 Control 2 [Timbre]\",\n \"0x48 Control 3 [Release]\",\n \"0x49 Control 4 [Attack]\",\n \"0x4A Control 5 [Brightness]\",\n \"0x4B Control 6 [Decay]\",\n \"0x4C Control 7 [Vib Rate]\",\n \"0x4D Control 8 [Vib Depth]\",\n \"0x4E Control 9 [Vib Delay]\",\n \"0x4F Control 10 [Undefined]\",\n \"0x50 General Purpose 5\",\n \"0x51 General Purpose 6\",\n \"0x52 General Purpose 7\",\n \"0x53 General Purpose 8\",\n \"0x54 Portamento Control\",\n \"0x5B FX 1 Depth [Reverb]\",\n \"0x5C FX 2 Depth [Tremolo]\",\n \"0x5D FX 3 Depth [Chorus]\",\n \"0x5E FX 4 Depth [Detune]\",\n \"0x5F FX 5 Depth [Phaser]\"\n)\n\n# ------------------------------------------------------------------------------------------------------------\n# PatchCanvas defines\n\nCANVAS_ANTIALIASING_SMALL = 1\nCANVAS_EYECANDY_SMALL = 1\n\n# ------------------------------------------------------------------------------------------------------------\n# Carla Settings keys\n\nCARLA_KEY_MAIN_PROJECT_FOLDER = \"Main/ProjectFolder\" # str\nCARLA_KEY_MAIN_USE_PRO_THEME = \"Main/UseProTheme\" # bool\nCARLA_KEY_MAIN_PRO_THEME_COLOR = \"Main/ProThemeColor\" # str\nCARLA_KEY_MAIN_REFRESH_INTERVAL = \"Main/RefreshInterval\" # int\nCARLA_KEY_MAIN_USE_CUSTOM_SKINS = \"Main/UseCustomSkins\" # bool\n\nCARLA_KEY_CANVAS_THEME = \"Canvas/Theme\" # str\nCARLA_KEY_CANVAS_SIZE = \"Canvas/Size\" # str \"NxN\"\nCARLA_KEY_CANVAS_USE_BEZIER_LINES = \"Canvas/UseBezierLines\" # bool\nCARLA_KEY_CANVAS_AUTO_HIDE_GROUPS = \"Canvas/AutoHideGroups\" # bool\nCARLA_KEY_CANVAS_AUTO_SELECT_ITEMS = \"Canvas/AutoSelectItems\" # bool\nCARLA_KEY_CANVAS_EYE_CANDY = \"Canvas/EyeCandy\" # enum\nCARLA_KEY_CANVAS_USE_OPENGL = \"Canvas/UseOpenGL\" # bool\nCARLA_KEY_CANVAS_ANTIALIASING = \"Canvas/Antialiasing\" # enum\nCARLA_KEY_CANVAS_HQ_ANTIALIASING = \"Canvas/HQAntialiasing\" # bool\n\nCARLA_KEY_ENGINE_DRIVER_PREFIX = \"Engine/Driver-\"\nCARLA_KEY_ENGINE_AUDIO_DRIVER = \"Engine/AudioDriver\" # str\nCARLA_KEY_ENGINE_PROCESS_MODE = \"Engine/ProcessMode\" # enum\nCARLA_KEY_ENGINE_TRANSPORT_MODE = \"Engine/TransportMode\" # enum\nCARLA_KEY_ENGINE_FORCE_STEREO = \"Engine/ForceStereo\" # bool\nCARLA_KEY_ENGINE_PREFER_PLUGIN_BRIDGES = \"Engine/PreferPluginBridges\" # bool\nCARLA_KEY_ENGINE_PREFER_UI_BRIDGES = \"Engine/PreferUiBridges\" # bool\nCARLA_KEY_ENGINE_UIS_ALWAYS_ON_TOP = \"Engine/UIsAlwaysOnTop\" # bool\nCARLA_KEY_ENGINE_MAX_PARAMETERS = \"Engine/MaxParameters\" # int\nCARLA_KEY_ENGINE_UI_BRIDGES_TIMEOUT = \"Engine/UiBridgesTimeout\" # int\n\nCARLA_KEY_PATHS_LADSPA = \"Paths/LADSPA\"\nCARLA_KEY_PATHS_DSSI = \"Paths/DSSI\"\nCARLA_KEY_PATHS_LV2 = \"Paths/LV2\"\nCARLA_KEY_PATHS_VST2 = \"Paths/VST2\"\nCARLA_KEY_PATHS_VST3 = \"Paths/VST3\"\nCARLA_KEY_PATHS_GIG = \"Paths/GIG\"\nCARLA_KEY_PATHS_SF2 = \"Paths/SF2\"\nCARLA_KEY_PATHS_SFZ = \"Paths/SFZ\"\n\n# if pro theme is on and color is black\nCARLA_KEY_CUSTOM_PAINTING = \"UseCustomPainting\" # bool\n\n# ------------------------------------------------------------------------------------------------------------\n# Carla Settings defaults\n\n# Main\nCARLA_DEFAULT_MAIN_PROJECT_FOLDER = HOME\nCARLA_DEFAULT_MAIN_USE_PRO_THEME = True\nCARLA_DEFAULT_MAIN_PRO_THEME_COLOR = \"Black\"\nCARLA_DEFAULT_MAIN_REFRESH_INTERVAL = 20\nCARLA_DEFAULT_MAIN_USE_CUSTOM_SKINS = True\n\n# Canvas\nCARLA_DEFAULT_CANVAS_THEME = \"Modern Dark\"\nCARLA_DEFAULT_CANVAS_SIZE = \"3100x2400\"\nCARLA_DEFAULT_CANVAS_SIZE_WIDTH = 3100\nCARLA_DEFAULT_CANVAS_SIZE_HEIGHT = 2400\nCARLA_DEFAULT_CANVAS_USE_BEZIER_LINES = True\nCARLA_DEFAULT_CANVAS_AUTO_HIDE_GROUPS = True\nCARLA_DEFAULT_CANVAS_AUTO_SELECT_ITEMS = False\nCARLA_DEFAULT_CANVAS_EYE_CANDY = CANVAS_EYECANDY_SMALL\nCARLA_DEFAULT_CANVAS_USE_OPENGL = False\nCARLA_DEFAULT_CANVAS_ANTIALIASING = CANVAS_ANTIALIASING_SMALL\nCARLA_DEFAULT_CANVAS_HQ_ANTIALIASING = False\n\n# Engine\nCARLA_DEFAULT_FORCE_STEREO = False\nCARLA_DEFAULT_PREFER_PLUGIN_BRIDGES = False\nCARLA_DEFAULT_PREFER_UI_BRIDGES = bool(not WINDOWS)\nCARLA_DEFAULT_UIS_ALWAYS_ON_TOP = False\nCARLA_DEFAULT_MAX_PARAMETERS = MAX_DEFAULT_PARAMETERS\nCARLA_DEFAULT_UI_BRIDGES_TIMEOUT = 4000\n\nCARLA_DEFAULT_AUDIO_NUM_PERIODS = 2\nCARLA_DEFAULT_AUDIO_BUFFER_SIZE = 512\nCARLA_DEFAULT_AUDIO_SAMPLE_RATE = 44100\n\nif WINDOWS:\n CARLA_DEFAULT_AUDIO_DRIVER = \"DirectSound\"\nelif MACOS:\n CARLA_DEFAULT_AUDIO_DRIVER = \"CoreAudio\"\nelse:\n CARLA_DEFAULT_AUDIO_DRIVER = \"JACK\"\n\nif LINUX:\n CARLA_DEFAULT_PROCESS_MODE = ENGINE_PROCESS_MODE_MULTIPLE_CLIENTS\n CARLA_DEFAULT_TRANSPORT_MODE = ENGINE_TRANSPORT_MODE_JACK\nelse:\n CARLA_DEFAULT_PROCESS_MODE = ENGINE_PROCESS_MODE_PATCHBAY\n CARLA_DEFAULT_TRANSPORT_MODE = ENGINE_TRANSPORT_MODE_INTERNAL\n\n# ------------------------------------------------------------------------------------------------------------\n# Default Plugin Folders (get)\n\nDEFAULT_LADSPA_PATH = \"\"\nDEFAULT_DSSI_PATH = \"\"\nDEFAULT_LV2_PATH = \"\"\nDEFAULT_VST2_PATH = \"\"\nDEFAULT_VST3_PATH = \"\"\nDEFAULT_GIG_PATH = \"\"\nDEFAULT_SF2_PATH = \"\"\nDEFAULT_SFZ_PATH = \"\"\n\nif WINDOWS:\n splitter = \";\"\n\n APPDATA = os.getenv(\"APPDATA\")\n PROGRAMFILES = os.getenv(\"PROGRAMFILES\")\n PROGRAMFILESx86 = os.getenv(\"PROGRAMFILES(x86)\")\n COMMONPROGRAMFILES = os.getenv(\"COMMONPROGRAMFILES\")\n COMMONPROGRAMFILESx86 = os.getenv(\"COMMONPROGRAMFILES(x86)\")\n\n # Small integrity tests\n if not APPDATA:\n qFatal(\"APPDATA variable not set, cannot continue\")\n sys.exit(1)\n\n if not PROGRAMFILES:\n qFatal(\"PROGRAMFILES variable not set, cannot continue\")\n sys.exit(1)\n\n if not COMMONPROGRAMFILES:\n qFatal(\"COMMONPROGRAMFILES variable not set, cannot continue\")\n sys.exit(1)\n\n DEFAULT_LADSPA_PATH = APPDATA + \"\\\\LADSPA\"\n DEFAULT_LADSPA_PATH += \";\" + PROGRAMFILES + \"\\\\LADSPA\"\n\n DEFAULT_DSSI_PATH = APPDATA + \"\\\\DSSI\"\n DEFAULT_DSSI_PATH += \";\" + PROGRAMFILES + \"\\\\DSSI\"\n\n DEFAULT_LV2_PATH = APPDATA + \"\\\\LV2\"\n DEFAULT_LV2_PATH += \";\" + COMMONPROGRAMFILES + \"\\\\LV2\"\n\n DEFAULT_VST2_PATH = PROGRAMFILES + \"\\\\VstPlugins\"\n DEFAULT_VST2_PATH += \";\" + PROGRAMFILES + \"\\\\Steinberg\\\\VstPlugins\"\n\n if kIs64bit:\n DEFAULT_VST2_PATH += \";\" + COMMONPROGRAMFILES + \"\\\\VST2\"\n\n DEFAULT_VST3_PATH = COMMONPROGRAMFILES + \"\\\\VST3\"\n\n DEFAULT_GIG_PATH = APPDATA + \"\\\\GIG\"\n DEFAULT_SF2_PATH = APPDATA + \"\\\\SF2\"\n DEFAULT_SFZ_PATH = APPDATA + \"\\\\SFZ\"\n\n if PROGRAMFILESx86:\n DEFAULT_LADSPA_PATH += \";\" + PROGRAMFILESx86 + \"\\\\LADSPA\"\n DEFAULT_DSSI_PATH += \";\" + PROGRAMFILESx86 + \"\\\\DSSI\"\n DEFAULT_VST2_PATH += \";\" + PROGRAMFILESx86 + \"\\\\VstPlugins\"\n DEFAULT_VST2_PATH += \";\" + PROGRAMFILESx86 + \"\\\\Steinberg\\\\VstPlugins\"\n\n if COMMONPROGRAMFILESx86:\n DEFAULT_VST3_PATH += COMMONPROGRAMFILESx86 + \"\\\\VST3\"\n\nelif HAIKU:\n splitter = \":\"\n\n DEFAULT_LADSPA_PATH = HOME + \"/.ladspa\"\n DEFAULT_LADSPA_PATH += \":/boot/common/add-ons/ladspa\"\n\n DEFAULT_DSSI_PATH = HOME + \"/.dssi\"\n DEFAULT_DSSI_PATH += \":/boot/common/add-ons/dssi\"\n\n DEFAULT_LV2_PATH = HOME + \"/.lv2\"\n DEFAULT_LV2_PATH += \":/boot/common/add-ons/lv2\"\n\n DEFAULT_VST2_PATH = HOME + \"/.vst\"\n DEFAULT_VST2_PATH += \":/boot/common/add-ons/vst\"\n\n DEFAULT_VST3_PATH = HOME + \"/.vst3\"\n DEFAULT_VST3_PATH += \":/boot/common/add-ons/vst3\"\n\nelif MACOS:\n splitter = \":\"\n\n DEFAULT_LADSPA_PATH = HOME + \"/Library/Audio/Plug-Ins/LADSPA\"\n DEFAULT_LADSPA_PATH += \":/Library/Audio/Plug-Ins/LADSPA\"\n\n DEFAULT_DSSI_PATH = HOME + \"/Library/Audio/Plug-Ins/DSSI\"\n DEFAULT_DSSI_PATH += \":/Library/Audio/Plug-Ins/DSSI\"\n\n DEFAULT_LV2_PATH = HOME + \"/Library/Audio/Plug-Ins/LV2\"\n DEFAULT_LV2_PATH += \":/Library/Audio/Plug-Ins/LV2\"\n\n DEFAULT_VST2_PATH = HOME + \"/Library/Audio/Plug-Ins/VST\"\n DEFAULT_VST2_PATH += \":/Library/Audio/Plug-Ins/VST\"\n\n DEFAULT_VST3_PATH = HOME + \"/Library/Audio/Plug-Ins/VST3\"\n DEFAULT_VST3_PATH += \":/Library/Audio/Plug-Ins/VST3\"\n\nelse:\n splitter = \":\"\n\n DEFAULT_LADSPA_PATH = HOME + \"/.ladspa\"\n DEFAULT_LADSPA_PATH += \":/usr/lib/ladspa\"\n DEFAULT_LADSPA_PATH += \":/usr/local/lib/ladspa\"\n\n DEFAULT_DSSI_PATH = HOME + \"/.dssi\"\n DEFAULT_DSSI_PATH += \":/usr/lib/dssi\"\n DEFAULT_DSSI_PATH += \":/usr/local/lib/dssi\"\n\n DEFAULT_LV2_PATH = HOME + \"/.lv2\"\n DEFAULT_LV2_PATH += \":/usr/lib/lv2\"\n DEFAULT_LV2_PATH += \":/usr/local/lib/lv2\"\n\n DEFAULT_VST2_PATH = HOME + \"/.vst\"\n DEFAULT_VST2_PATH += \":/usr/lib/vst\"\n DEFAULT_VST2_PATH += \":/usr/local/lib/vst\"\n\n DEFAULT_VST3_PATH = HOME + \"/.vst3\"\n DEFAULT_VST3_PATH += \":/usr/lib/vst3\"\n DEFAULT_VST3_PATH += \":/usr/local/lib/vst3\"\n\n DEFAULT_GIG_PATH = HOME + \"/.sounds/gig\"\n DEFAULT_GIG_PATH += \":/usr/share/sounds/gig\"\n\n DEFAULT_SF2_PATH = HOME + \"/.sounds/sf2\"\n DEFAULT_SF2_PATH += \":/usr/share/sounds/sf2\"\n\n DEFAULT_SFZ_PATH = HOME + \"/.sounds/sfz\"\n DEFAULT_SFZ_PATH += \":/usr/share/sounds/sfz\"\n\nif not WINDOWS:\n winePrefix = os.getenv(\"WINEPREFIX\")\n\n if not winePrefix:\n winePrefix = HOME + \"/.wine\"\n\n if os.path.exists(winePrefix):\n DEFAULT_VST2_PATH += \":\" + winePrefix + \"/drive_c/Program Files/VstPlugins\"\n DEFAULT_VST3_PATH += \":\" + winePrefix + \"/drive_c/Program Files/Common Files/VST3\"\n\n if kIs64bit and os.path.exists(winePrefix + \"/drive_c/Program Files (x86)\"):\n DEFAULT_VST2_PATH += \":\" + winePrefix + \"/drive_c/Program Files (x86)/VstPlugins\"\n DEFAULT_VST3_PATH += \":\" + winePrefix + \"/drive_c/Program Files (x86)/Common Files/VST3\"\n\n del winePrefix\n\n# ------------------------------------------------------------------------------------------------------------\n# Default Plugin Folders (set)\n\nreadEnvVars = True\n\nif WINDOWS:\n # Check if running Wine. If yes, ignore env vars\n from winreg import ConnectRegistry, OpenKey, CloseKey, HKEY_CURRENT_USER\n reg = ConnectRegistry(None, HKEY_CURRENT_USER)\n\n try:\n key = OpenKey(reg, r\"SOFTWARE\\Wine\")\n CloseKey(key)\n del key\n readEnvVars = False\n except:\n pass\n\n CloseKey(reg)\n del reg\n\nif readEnvVars:\n CARLA_DEFAULT_LADSPA_PATH = os.getenv(\"LADSPA_PATH\", DEFAULT_LADSPA_PATH).split(splitter)\n CARLA_DEFAULT_DSSI_PATH = os.getenv(\"DSSI_PATH\", DEFAULT_DSSI_PATH).split(splitter)\n CARLA_DEFAULT_LV2_PATH = os.getenv(\"LV2_PATH\", DEFAULT_LV2_PATH).split(splitter)\n CARLA_DEFAULT_VST2_PATH = os.getenv(\"VST_PATH\", DEFAULT_VST2_PATH).split(splitter)\n CARLA_DEFAULT_VST3_PATH = os.getenv(\"VST3_PATH\", DEFAULT_VST3_PATH).split(splitter)\n CARLA_DEFAULT_GIG_PATH = os.getenv(\"GIG_PATH\", DEFAULT_GIG_PATH).split(splitter)\n CARLA_DEFAULT_SF2_PATH = os.getenv(\"SF2_PATH\", DEFAULT_SF2_PATH).split(splitter)\n CARLA_DEFAULT_SFZ_PATH = os.getenv(\"SFZ_PATH\", DEFAULT_SFZ_PATH).split(splitter)\n\nelse:\n CARLA_DEFAULT_LADSPA_PATH = DEFAULT_LADSPA_PATH.split(splitter)\n CARLA_DEFAULT_DSSI_PATH = DEFAULT_DSSI_PATH.split(splitter)\n CARLA_DEFAULT_LV2_PATH = DEFAULT_LV2_PATH.split(splitter)\n CARLA_DEFAULT_VST2_PATH = DEFAULT_VST2_PATH.split(splitter)\n CARLA_DEFAULT_VST3_PATH = DEFAULT_VST3_PATH.split(splitter)\n CARLA_DEFAULT_GIG_PATH = DEFAULT_GIG_PATH.split(splitter)\n CARLA_DEFAULT_SF2_PATH = DEFAULT_SF2_PATH.split(splitter)\n CARLA_DEFAULT_SFZ_PATH = DEFAULT_SFZ_PATH.split(splitter)\n\n# ------------------------------------------------------------------------------------------------------------\n# Default Plugin Folders (cleanup)\n\ndel DEFAULT_LADSPA_PATH\ndel DEFAULT_DSSI_PATH\ndel DEFAULT_LV2_PATH\ndel DEFAULT_VST2_PATH\ndel DEFAULT_VST3_PATH\ndel DEFAULT_GIG_PATH\ndel DEFAULT_SF2_PATH\ndel DEFAULT_SFZ_PATH\n\n# ------------------------------------------------------------------------------------------------------------\n# Global Carla object\n\nclass CarlaObject(object):\n __slots__ = [\n 'gui', # Host Window\n 'nogui', # Skip UI\n 'term', # Terminated by OS signal\n 'utils' # Utils object\n ]\n\ngCarla = CarlaObject()\ngCarla.gui = None\ngCarla.nogui = False\ngCarla.term = False\ngCarla.utils = None\n\n# ------------------------------------------------------------------------------------------------------------\n# Set CWD\n\nCWD = sys.path[0]\n\nif not CWD:\n CWD = os.path.dirname(sys.argv[0])\n\n# make it work with cxfreeze\nif os.path.isfile(CWD):\n CWD = os.path.dirname(CWD)\n CXFREEZE = True\nelse:\n CXFREEZE = False\n\n# ------------------------------------------------------------------------------------------------------------\n# Set DLL_EXTENSION\n\nif WINDOWS:\n DLL_EXTENSION = \"dll\"\nelif MACOS:\n DLL_EXTENSION = \"dylib\"\nelse:\n DLL_EXTENSION = \"so\"\n\n# ------------------------------------------------------------------------------------------------------------\n# Check if a value is a number (float support)\n\ndef isNumber(value):\n try:\n float(value)\n return True\n except:\n return False\n\n# ------------------------------------------------------------------------------------------------------------\n# Convert a value to a list\n\ndef toList(value):\n if value is None:\n return []\n elif not isinstance(value, list):\n return [value]\n else:\n return value\n\n# ------------------------------------------------------------------------------------------------------------\n# Get Icon from user theme, using our own as backup (Oxygen)\n\ndef getIcon(icon, size = 16):\n return QIcon.fromTheme(icon, QIcon(\":/%ix%i/%s.png\" % (size, size, icon)))\n\n# ------------------------------------------------------------------------------------------------------------\n# Handle some basic command-line arguments shared between all carla variants\n\ndef handleInitialCommandLineArguments(file):\n initName = os.path.basename(file) if (file is not None and os.path.dirname(file) in PATH) else sys.argv[0]\n libPrefix = None\n\n for arg in sys.argv[1:]:\n if arg.startswith(\"--with-appname=\"):\n initName = os.path.basename(arg.replace(\"--with-initname=\", \"\"))\n\n elif arg.startswith(\"--with-libprefix=\"):\n libPrefix = arg.replace(\"--with-libprefix=\", \"\")\n\n elif arg == \"--gdb\":\n pass\n\n elif arg in (\"-n\", \"--n\", \"-no-gui\", \"--no-gui\", \"-nogui\", \"--nogui\"):\n gCarla.nogui = True\n\n elif arg in (\"-h\", \"--h\", \"-help\", \"--help\"):\n print(\"Usage: %s [OPTION]... [FILE|URL]\" % initName)\n print(\"\")\n print(\" where FILE can be a Carla project or preset file to be loaded, or URL if using Carla-Control\")\n print(\"\")\n print(\" and OPTION can be one or more of the following:\")\n print(\"\")\n print(\" --gdb \\t Run Carla inside gdb.\")\n print(\" -n,--no-gui \\t Run Carla headless, don't show UI.\")\n print(\"\")\n print(\" -h,--help \\t Print this help text and exit.\")\n print(\" -v,--version\\t Print version information and exit.\")\n print(\"\")\n\n sys.exit(0)\n\n elif arg in (\"-v\", \"--v\", \"-version\", \"--version\"):\n pathBinaries, pathResources = getPaths(libPrefix)\n\n print(\"Using Carla version %s\" % VERSION)\n print(\" Python version: %s\" % sys.version.split(\" \",1)[0])\n print(\" Qt version: %s\" % qVersion())\n print(\" PyQt version: %s\" % PYQT_VERSION_STR)\n print(\" Binary dir: %s\" % pathBinaries)\n print(\" Resources dir: %s\" % pathResources)\n\n sys.exit(0)\n\n return (initName, libPrefix)\n\n# ------------------------------------------------------------------------------------------------------------\n# Get initial project file (as passed in the command-line parameters)\n\ndef getInitialProjectFile(app, skipExistCheck = False):\n for arg in app.arguments()[1:]:\n if arg.startswith(\"--with-appname=\") or arg.startswith(\"--with-libprefix=\") or arg == \"--gdb\":\n continue\n if arg in (\"-n\", \"--n\", \"-no-gui\", \"--no-gui\", \"-nogui\", \"--nogui\"):\n continue\n if skipExistCheck or os.path.exists(arg):\n return arg\n\n return None\n\n# ------------------------------------------------------------------------------------------------------------\n# Get paths (binaries, resources)\n\ndef getPaths(libPrefix = None):\n CWDl = CWD.lower()\n\n # adjust for special distros\n libdir = os.path.basename(os.path.normpath(X_LIBDIR_X)) if X_LIBDIR_X else \"lib\"\n datadir = os.path.basename(os.path.normpath(X_DATADIR_X)) if X_DATADIR_X else \"share\"\n\n # standalone, installed system-wide linux\n if libPrefix is not None:\n pathBinaries = os.path.join(libPrefix, libdir, \"carla\")\n pathResources = os.path.join(libPrefix, datadir, \"carla\", \"resources\")\n\n # standalone, local source\n elif CWDl.endswith(\"source\"):\n pathBinaries = os.path.abspath(os.path.join(CWD, \"..\", \"bin\"))\n pathResources = os.path.join(pathBinaries, \"resources\")\n\n # plugin\n elif CWDl.endswith(\"resources\"):\n # installed system-wide linux\n if CWDl.endswith(\"/share/carla/resources\"):\n pathBinaries = os.path.abspath(os.path.join(CWD, \"..\", \"..\", \"..\", libdir, \"carla\"))\n pathResources = CWD\n\n # local source\n elif CWDl.endswith(\"native-plugins%sresources\" % os.sep):\n pathBinaries = os.path.abspath(os.path.join(CWD, \"..\", \"..\", \"..\", \"bin\"))\n pathResources = CWD\n\n # other\n else:\n pathBinaries = os.path.abspath(os.path.join(CWD, \"..\"))\n pathResources = CWD\n\n # everything else\n else:\n pathBinaries = CWD\n pathResources = os.path.join(pathBinaries, \"resources\")\n\n return (pathBinaries, pathResources)\n\n# ------------------------------------------------------------------------------------------------------------\n# Signal handler\n# TODO move to carla_host.py or something\n\ndef signalHandler(sig, frame):\n if sig in (SIGINT, SIGTERM):\n gCarla.term = True\n if gCarla.gui is not None:\n gCarla.gui.SIGTERM.emit()\n\n elif haveSIGUSR1 and sig == SIGUSR1:\n if gCarla.gui is not None:\n gCarla.gui.SIGUSR1.emit()\n\ndef setUpSignals():\n signal(SIGINT, signalHandler)\n signal(SIGTERM, signalHandler)\n\n if not haveSIGUSR1:\n return\n\n signal(SIGUSR1, signalHandler)\n\n# ------------------------------------------------------------------------------------------------------------\n# QLineEdit and QPushButton combo\n\ndef getAndSetPath(parent, lineEdit):\n newPath = QFileDialog.getExistingDirectory(parent, parent.tr(\"Set Path\"), lineEdit.text(), QFileDialog.ShowDirsOnly)\n if newPath:\n lineEdit.setText(newPath)\n return newPath\n\n# ------------------------------------------------------------------------------------------------------------\n# Custom MessageBox\n\ndef CustomMessageBox(parent, icon, title, text, extraText=\"\", buttons=QMessageBox.Yes|QMessageBox.No, defButton=QMessageBox.No):\n msgBox = QMessageBox(parent)\n msgBox.setIcon(icon)\n msgBox.setWindowTitle(title)\n msgBox.setText(text)\n msgBox.setInformativeText(extraText)\n msgBox.setStandardButtons(buttons)\n msgBox.setDefaultButton(defButton)\n return msgBox.exec_()\n\n# ------------------------------------------------------------------------------------------------------------\n","sub_path":"source/carla_shared.py","file_name":"carla_shared.py","file_ext":"py","file_size_in_byte":24046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"67449083","text":"# -*- coding: utf-8 -*-\n# @Author: Hongyu Chen\n# @Date: 2019-05-09 18:44:27\n# @Last Modified by: Hongyu Chen\n# @Last Modified time: 2019-05-09 18:52:48\n\n\nclass Solution:\n\tdef nextPermutation(self, nums):\n\t\t\"\"\"\n\t\tDo not return anything, modify nums in-place instead.\n\t\t\"\"\"\n\t\tif nums == []:\n\t\t\treturn\n\t\tif len(nums) == 1:\n\t\t\treturn\n\n\t\t# 寻找第一个非递增数 如没有则说明已到最大序列\n\t\tfor i in range(len(nums)-2, -1, -1):\n\t\t\tif nums[i] < nums[i+1]:\n\t\t\t\tnonincrease = i\n\t\t\t\tbreak\n\t\t\t# 逆序排列即得到最小序列\n\t\t\tif i == 0:\n\t\t\t\tlb = 0\n\t\t\t\trb = len(nums) - 1\n\t\t\t\twhile lb < rb:\n\t\t\t\t\tnums[lb], nums[rb] = nums[rb], nums[lb]\n\t\t\t\t\tlb += 1\n\t\t\t\t\trb -= 1\n\t\t\t\treturn\n\n\t\t# 在[nonincrease+1, len(nums)-1]中寻找最小的大于nums[nonincrease]的数 若存在多个相同的此数则取序列中最后一个\n\t\t# 因为nums[nonincrease + 1] > nums[nonincrease]\n\t\tminpos = nonincrease + 1\n\t\tfor i in range(nonincrease+1, len(nums)):\n\t\t\tif nums[i] > nums[nonincrease]:\n\t\t\t\tif nums[i] <= nums[minpos]:\n\t\t\t\t\tminpos = i\n\t\tnums[nonincrease], nums[minpos] = nums[minpos], nums[nonincrease]\n\n\t\t# 对num[nonincrease+1, len(nums)-1]逆序排列\n\t\tlb = nonincrease + 1\n\t\trb = len(nums) - 1\n\t\twhile lb < rb:\n\t\t\tnums[lb], nums[rb] = nums[rb], nums[lb]\n\t\t\tlb += 1\n\t\t\trb -= 1\n\n","sub_path":"Leetcode/leetcode31 下一个排列.py","file_name":"leetcode31 下一个排列.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"160819352","text":"from zope.component import adapts\nfrom Products.Archetypes import PloneMessageFactory as _\n\nfrom Products.Archetypes.atapi import AnnotationStorage\nfrom Products.ATContentTypes.configuration import zconf\nfrom Products.ATContentTypes.content.event import ATEvent\nfrom Products.ATContentTypes.permission import ChangeEvents\n\nfrom raptus.multilanguagefields import widgets\nimport fields\n\nfrom base import DefaultExtender\n\nclass EventExtender(DefaultExtender):\n adapts(ATEvent)\n\n fields = DefaultExtender.fields + [\n fields.StringField('location',\n searchable=True,\n write_permission = ChangeEvents,\n widget = widgets.StringWidget(\n description = '',\n label = _(u'label_event_location', default=u'Event Location')\n )\n ),\n fields.TextField('text',\n required=False,\n searchable=True,\n# we got an error with this attribute on Plone 3.3\n# Tried to add 'text___fr___' as primary field but already has the primary field 'text'\n#\n# we need the primary markers to have the getContentType method working\n primary=True,\n storage = AnnotationStorage(migrate=True),\n default_output_type = 'text/x-html-safe',\n widget = widgets.RichWidget(\n description = '',\n label = _(u'label_event_announcement', default=u'Event body text'),\n rows = 25,\n allow_file_upload = zconf.ATDocument.allow_document_upload\n ),\n schemata='default',\n ),\n ]","sub_path":"raptus/multilanguageplone/extender/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"200782150","text":"# Initializing two stacks\nstackin, stackout = [], []\n\nfor i in range(int(input())):\n \n itr = list(map(int, input().split(\" \")))\n itr_opr = itr[0]\n \n\n if itr_opr == 1:\n stackin.append(itr[1])\n\n if itr_opr == 2:\n # If stack_out is not empty then pop from it\n if stackout:\n stackout.pop()\n continue\n else:\n # Move everything to the out stack:\n while stackin:\n stackout.append(stackin.pop())\n stackout.pop()\n continue\n\n if itr_opr == 3:\n if not stackout:\n print(stackin[0])\n else:\n print(stackout[-1])","sub_path":"Queue/Queueusingtwostack.py","file_name":"Queueusingtwostack.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"64611495","text":"# @Time : 2019/6/16 8:32\n# @Author : Xu Huipeng\n# @Blog : https://brycexxx.github.io/\n\nfrom typing import List\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def postorderTraversal(self, root: TreeNode) -> List[int]:\n if not root: return []\n\n res = []\n stack = [root]\n\n while stack:\n if not stack[-1].left and not stack[-1].right:\n cur = stack.pop()\n res.append(cur.val)\n if not stack:\n break\n last = stack[-1]\n if last.right:\n stack.append(last.right)\n last.right = None\n if last.left:\n stack.append(last.left)\n last.left = None\n\n return res\n\n def postorderTraversal1(self, root: TreeNode) -> List[int]:\n res = []\n\n def recur(root: TreeNode) -> None:\n if not root: return\n recur(root.left)\n recur(root.right)\n res.append(root.val)\n\n recur(root)\n return res\n\n # 参考:https://leetcode-cn.com/problems/binary-tree-postorder-traversal/solution/er-cha-shu-hou-xu-bian-li-dian-xing-die-dai-fa-by-/\n def postorderTraversal2(self, root: TreeNode) -> List[int]:\n res, stack = [], []\n last, node = None, root\n\n while node or stack:\n while node:\n stack.append(node)\n node = node.left\n cur = stack[-1]\n if not cur.right or last == cur.right:\n stack.pop()\n res.append(cur.val)\n last, node = cur, None\n else:\n node = cur.right\n return res\n\n def postorderTraversal3(self, root: TreeNode) -> List[int]:\n \"\"\"\n 更为简洁的写法。这种解法的思想是先序遍历的变形,先序遍历是“根->左->右”,\n 后序遍历是“左->右->根”,那么把先序遍历改成“根->右->左”,再逆序一下就是后序遍历。\n \"\"\"\n if not root: return []\n\n res, stack = [], [root]\n\n while stack:\n cur = stack.pop()\n res.append(cur.val)\n if cur.left:\n stack.append(cur.left)\n if cur.right:\n stack.append(cur.right)\n\n return res[::-1]\n\n\n\nif __name__ == '__main__':\n root = TreeNode(1)\n root.right = TreeNode(2)\n root.right.left = TreeNode(3)\n\n s = Solution()\n print(s.postorderTraversal(root))\n","sub_path":"postorderTraversal.py","file_name":"postorderTraversal.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"444026528","text":"from urlwatch.filters import GetElementById\n\nfrom nose.tools import eq_\n\n\ndef test_get_element_by_id():\n get_element_by_id = GetElementById(None, None)\n result = get_element_by_id.filter(\"\"\"\n \n
asdf bar
\n
asdf bar hoho
\n \n \"\"\", 'bar')\n print(result)\n eq_(result, '
asdf bar hoho
')\n","sub_path":"test/test_filters.py","file_name":"test_filters.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"256835382","text":"from . import core as ewutils\nfrom . import frontend as fe_utils\nfrom .combat import EwUser\nfrom ..backend import core as bknd_core\nfrom ..backend.district import EwDistrictBase as EwDistrict\nfrom ..backend.market import EwMarket\nfrom ..backend.market import EwStock\nfrom ..backend.player import EwPlayer\nfrom ..static import cfg as ewcfg\nfrom ..static import poi as poi_static\n\n\nasync def post_leaderboards(client = None, server = None):\n\tleaderboard_channel = fe_utils.get_channel(server = server, channel_name = ewcfg.channel_leaderboard)\n\n\tmarket = EwMarket(id_server = server.id)\n\ttime = \"day {}\".format(market.day) \n\n\tawait fe_utils.send_message(client, leaderboard_channel, \"▓▓{} **STATE OF THE CITY:** {} {}▓▓\".format(ewcfg.emote_theeye, time, ewcfg.emote_theeye))\n\n\tkingpins = make_kingpin_board(server = server, title = ewcfg.leaderboard_kingpins)\n\tawait fe_utils.send_message(client, leaderboard_channel, kingpins)\n\tdistricts = make_district_control_board(id_server = server.id, title = ewcfg.leaderboard_districts)\n\tawait fe_utils.send_message(client, leaderboard_channel, districts)\n\ttopslimes = make_userdata_board(server = server, category = ewcfg.col_slimes, title = ewcfg.leaderboard_slimes)\n\tawait fe_utils.send_message(client, leaderboard_channel, topslimes)\n\t#topcoins = make_userdata_board(server = server, category = ewcfg.col_slimecoin, title = ewcfg.leaderboard_slimecoin)\n\tewutils.logMsg(\"starting net worth calc\")\n\ttopcoins = make_stocks_top_board(server = server)\n\tewutils.logMsg(\"finished net worth calc\")\n\tawait fe_utils.send_message(client, leaderboard_channel, topcoins)\n\ttopghosts = make_userdata_board(server = server, category = ewcfg.col_slimes, title = ewcfg.leaderboard_ghosts, lowscores = True, rows = 3)\n\tawait fe_utils.send_message(client, leaderboard_channel, topghosts)\n\ttopbounty = make_userdata_board(server = server, category = ewcfg.col_bounty, title = ewcfg.leaderboard_bounty, divide_by = ewcfg.slimecoin_exchangerate)\n\tawait fe_utils.send_message(client, leaderboard_channel, topbounty)\n\t#topfashion = make_userdata_board(server = server, category = ewcfg.col_freshness, title = ewcfg.leaderboard_fashion)\n\tewutils.logMsg(\"starting freshness calc\")\n\ttopfashion = make_freshness_top_board(server = server)\n\tewutils.logMsg(\"finished freshness calc\")\n\tawait fe_utils.send_message(client, leaderboard_channel, topfashion)\n\ttopdonated = make_userdata_board(server = server, category = ewcfg.col_splattered_slimes, title = ewcfg.leaderboard_donated)\n\tawait fe_utils.send_message(client, leaderboard_channel, topdonated)\n\t#topdegraded = make_userdata_board(server = server, category = ewcfg.col_degradation, title = ewcfg.leaderboard_degradation)\n\t#await ewutils.send_message(client, leaderboard_channel, topdegraded)\n\t#topshamblerkills = make_statdata_board(server = server, category = ewcfg.stat_shamblers_killed, title = ewcfg.leaderboard_shamblers_killed)\n\t#await ewutils.send_message(client, leaderboard_channel, topshamblerkills)\n\ttopslimeoids = make_slimeoids_top_board(server = server)\n\tawait fe_utils.send_message(client, leaderboard_channel, topslimeoids)\n\t#topfestivity = make_slimernalia_board(server = server, title = ewcfg.leaderboard_slimernalia)\n\t#await ewutils.send_message(client, leaderboard_channel, topfestivity)\n\ttopzines = make_zines_top_board(server=server)\n\tawait fe_utils.send_message(client, leaderboard_channel, topzines)\n\t#topgambit = make_gambit_leaderboard(server = server, title = ewcfg.leaderboard_gambit_high)\n\t#await ewutils.send_message(client, leaderboard_channel, topgambit)\n\t#bottomgambit = make_gambit_leaderboard(server = server, title = ewcfg.leaderboard_gambit_low)\n\t#await ewutils.send_message(client, leaderboard_channel, bottomgambit)\n\ndef make_stocks_top_board(server = None):\n\tentries = []\n\ttry:\n\t\tplayers_coin = bknd_core.execute_sql_query((\n\t\t\t\"SELECT pl.display_name, u.life_state, u.faction, u.slimecoin, IFNULL(sh_kfc.shares, 0), IFNULL(sh_tb.shares, 0), IFNULL(sh_ph.shares, 0), u.id_user \" +\n\t\t\t\"FROM users AS u \" +\n\t\t\t\"INNER JOIN players AS pl ON u.id_user = pl.id_user \" +\n\t\t\t\"LEFT JOIN shares AS sh_kfc ON sh_kfc.id_user = u.id_user AND sh_kfc.id_server = u.id_server AND sh_kfc.stock = 'kfc' \" +\n\t\t\t\"LEFT JOIN shares AS sh_tb ON sh_tb.id_user = u.id_user AND sh_tb.id_server = u.id_server AND sh_tb.stock = 'tacobell' \" +\n\t\t\t\"LEFT JOIN shares AS sh_ph ON sh_ph.id_user = u.id_user AND sh_ph.id_server = u.id_server AND sh_ph.stock = 'pizzahut' \" +\n\t\t\t\"WHERE u.id_server = %(id_server)s \" +\n\t\t\t\"ORDER BY u.slimecoin DESC\"\n\t\t), {\n\t\t\t\"id_server\" : server.id,\n\t\t})\n\t\t\n\t\tstock_kfc = EwStock(id_server = server.id, stock = 'kfc')\n\t\tstock_tb = EwStock(id_server = server.id, stock = 'tacobell')\n\t\tstock_ph = EwStock(id_server = server.id, stock = 'pizzahut')\n\n\t\tshares_value = lambda shares, stock: round(shares * (stock.exchange_rate / 1000.0))\n\n\t\tnet_worth = lambda u: u[3] + shares_value(u[4], stock_kfc) + shares_value(u[5], stock_tb) + shares_value(u[6], stock_ph)\n\n\n\t\tnw_map = {}\n\t\tfor user in players_coin:\n\t\t\tnw_map[user[-1]] = net_worth(user)\n\n\t\tplayers_coin = sorted(players_coin, key = lambda u: nw_map.get(u[-1]), reverse=True)\n\n\t\tdata = map(lambda u: [u[0], u[1], u[2], nw_map.get(u[-1])], players_coin[:5])\n\n\t\tif data != None:\n\t\t\tfor row in data:\n\t\t\t\tif row != None:\n\t\t\t\t\tentries.append(row)\n\texcept:\n\t\tewutils.logMsg(\"Error occured while fetching stock leaderboard\")\n\n\t\n\treturn format_board(entries = entries, title = ewcfg.leaderboard_slimecoin)\n\ndef make_freshness_top_board(server = None):\n\tentries = []\n\ttry:\n\t\tall_adorned = bknd_core.execute_sql_query(\"SELECT id_item FROM items WHERE id_server = %s \" + \n\t\t\t\"AND id_item IN (SELECT id_item FROM items_prop WHERE name = 'adorned' AND value = 'true')\",\n\t\t\t( server.id, )\n\t\t)\n\n\t\tall_adorned = tuple(map(lambda a : a[0], all_adorned))\n \n\t\tif len(all_adorned) == 0:\n\t\t\treturn format_board(entries = entries, title = ewcfg.leaderboard_fashion)\n\n\t\tall_basefresh = bknd_core.execute_sql_query(\"SELECT id_item, value FROM items_prop WHERE name = 'freshness' \" + \n\t\t\t\"AND id_item IN %s\",\n\t\t\t( all_adorned, )\n\t\t)\n\n\t\tall_users = bknd_core.execute_sql_query(\"SELECT id_item, id_user FROM items WHERE id_item IN %s\", ( all_adorned, ))\n\n\n\t\tfresh_map = {}\n\n\t\tuser_fresh = {}\n\t\tfor row in all_basefresh:\n\t\t\tbasefresh = int(row[1])\n\t\t\tfresh_map[row[0]] = basefresh\n\n\t\tfor row in all_users:\n\t\t\tuser_fresh[row[1]] = 0\n\n\t\tfor row in all_users:\n\t\t\titem_fresh = fresh_map.get(row[0])\n\t\t\tif type(item_fresh) != int:\n\t\t\t\titem_fresh = 0\n\t\t\tuser_fresh[row[1]] += item_fresh\n\n\t\tuser_ids = sorted(user_fresh, key=lambda u : user_fresh[u], reverse=True)\n\n\t\t\n\t\ttop_five = []\n\n\t\tcurrent_user = None\n\n\t\tmax_fresh = lambda base : base * 50 + 100\n\n\t\twhile len(user_ids) > 0 and (len(top_five) < 5 or top_five[-1].freshness < max_fresh(user_fresh.get(user_ids[0]))):\n\t\t\tcurrent_user = EwUser(id_user = user_ids.pop(0), id_server = server.id, data_level = 2)\n\n\t\t\ttop_five.append(current_user)\n\n\t\t\ttop_five.sort(key=lambda u : u.freshness, reverse=True)\n\t\t\t\n\t\t\ttop_five = top_five[:5]\n\n\t\t\n\n\t\tdata = []\n\n\t\tfor user in top_five:\n\t\t\tplayer_data = EwPlayer(id_user = user.id_user)\n\n\t\t\tdata.append([player_data.display_name, user.life_state, user.faction, user.freshness])\n\n\t\tif data != None:\n\t\t\tfor row in data:\n\t\t\t\tif row != None:\n\t\t\t\t\tentries.append(row)\n\texcept:\n\t\tewutils.logMsg(\"Error occured while fetching fashion leaderboard\")\n\t\n\treturn format_board(entries = entries, title = ewcfg.leaderboard_fashion)\n\ndef make_slimeoids_top_board(server = None):\n\tboard = \"{mega} ▓▓▓▓▓ TOP SLIMEOIDS (CLOUT) ▓▓▓▓▓ {mega}\\n\".format(\n\t\tmega = \"<:megaslime:436877747240042508>\"\n\t)\n\n\ttry:\n\t\tconn_info = bknd_core.databaseConnect()\n\t\tconn = conn_info.get('conn')\n\t\tcursor = conn.cursor()\n\n\t\tcursor.execute((\n\t\t\t\"SELECT pl.display_name, sl.name, sl.clout \" +\n\t\t\t\"FROM slimeoids AS sl \" +\n\t\t\t\"INNER JOIN players AS pl ON sl.id_user = pl.id_user \" +\n\t\t\t\"WHERE sl.id_server = %s AND sl.life_state = 2 \" +\n\t\t\t\"ORDER BY sl.clout DESC LIMIT 3\"\n\t\t), (\n\t\t\tserver.id,\n\t\t))\n\n\t\tdata = cursor.fetchall()\n\t\tif data != None:\n\t\t\tfor row in data:\n\t\t\t\tboard += \"{} `{:_>3} | {}'s {}`\\n\".format(\n\t\t\t\t\tewcfg.emote_blank,\n\t\t\t\t\trow[2],\n\t\t\t\t\trow[0].replace(\"`\",\"\"),\n\t\t\t\t\trow[1].replace(\"`\",\"\")\n\t\t\t\t)\n\tfinally:\n\t\t# Clean up the database handles.\n\t\tcursor.close()\n\t\tbknd_core.databaseClose(conn_info)\n\n\treturn board\n\ndef make_zines_top_board(server = None):\n\tboard = \"{zine} ▓▓▓▓▓ BESTSELLING ZINES ▓▓▓▓▓ {zine}\\n\".format(\n\t\tzine = \"<:zine:655854388761460748>\"\n\t)\n\n\ttry:\n\t\tconn_info = bknd_core.databaseConnect()\n\t\tconn = conn_info.get('conn')\n\t\tcursor = conn.cursor()\n\n\t\tcursor.execute((\n\t\t\t\"SELECT b.title, b.author, b.sales \" +\n\t\t\t\"FROM books as b \" +\n\t\t\t\"WHERE b.id_server = %s AND b.book_state = 1 \" +\n\t\t\t\"ORDER BY b.sales DESC LIMIT 5\"\n\t\t), (\n\t\t\tserver.id,\n\t\t))\n\n\t\tdata = cursor.fetchall()\n\t\tif data != None:\n\t\t\tfor row in data:\n\t\t\t\tboard += \"{} `{:_>3} | {} by {}`\\n\".format(\n\t\t\t\t\tewcfg.emote_blank,\n\t\t\t\t\trow[2],\n\t\t\t\t\trow[0].replace(\"`\",\"\"),\n\t\t\t\t\trow[1].replace(\"`\",\"\")\n\t\t\t\t)\n\tfinally:\n\t\t# Clean up the database handles.\n\t\tcursor.close()\n\t\tbknd_core.databaseClose(conn_info)\n\n\treturn board\n\ndef make_userdata_board(server = None, category = \"\", title = \"\", lowscores = False, rows = 5, divide_by = 1):\n\tentries = []\n\ttry:\n\t\tconn_info = bknd_core.databaseConnect()\n\t\tconn = conn_info.get('conn')\n\t\tcursor = conn.cursor()\n\n\t\tcursor.execute(\"SELECT {name}, {state}, {faction}, {category} FROM users, players WHERE users.id_server = %s AND users.{id_user} = players.{id_user} AND users.{state} != {state_kingpin} ORDER BY {category} {order} LIMIT {limit}\".format(\n\t\t\tname = ewcfg.col_display_name,\n\t\t\tstate = ewcfg.col_life_state,\n\t\t\tfaction = ewcfg.col_faction,\n\t\t\tcategory = category,\n\t\t\tid_user = ewcfg.col_id_user,\n\t\t\tstate_kingpin = ewcfg.life_state_kingpin,\n\t\t\torder = ('DESC' if lowscores == False else 'ASC'),\n\t\t\tlimit = rows\n\t\t), (\n\t\t\tserver.id, \n\t\t))\n\t\ti = 0\n\t\trow = cursor.fetchone()\n\t\twhile (row != None) and (i < rows):\n\t\t\tif row[1] == ewcfg.life_state_kingpin or row[1] == ewcfg.life_state_grandfoe or row[1] == ewcfg.life_state_lucky:\n\t\t\t\trow = cursor.fetchone()\n\t\t\telse:\n\t\t\t\tentries.append(row)\n\t\t\t\trow = cursor.fetchone()\n\t\t\t\ti += 1\n\n\tfinally:\n\t\t# Clean up the database handles.\n\t\tcursor.close()\n\t\tbknd_core.databaseClose(conn_info)\n\n\treturn format_board(entries = entries, title = title, divide_by = divide_by)\n\ndef make_statdata_board(server = None, category = \"\", title = \"\", lowscores = False, rows = 5, divide_by = 1):\n\tentries = []\n\ttry:\n\t\tconn_info = bknd_core.databaseConnect()\n\t\tconn = conn_info.get('conn')\n\t\tcursor = conn.cursor()\n\n\t\tcursor.execute(\"SELECT {name}, {state}, {faction}, stats.{category_value} FROM users, players, stats WHERE users.id_server = %s AND users.{id_user} = players.{id_user} AND stats.id_server = users.id_server AND stats.{id_user} = users.{id_user} AND stats.{category_name} = %s ORDER BY stats.{category_value} {order} LIMIT {limit}\".format(\n\t\t\tname = ewcfg.col_display_name,\n\t\t\tstate = ewcfg.col_life_state,\n\t\t\tfaction = ewcfg.col_faction,\n\t\t\tcategory_name = ewcfg.col_stat_metric,\n\t\t\tcategory_value = ewcfg.col_stat_value,\n\t\t\tid_user = ewcfg.col_id_user,\n\t\t\torder = ('DESC' if lowscores == False else 'ASC'),\n\t\t\tlimit = rows\n\t\t), (\n\t\t\tserver.id, \n\t\t\tcategory\n\t\t))\n\n\t\ti = 0\n\t\trow = cursor.fetchone()\n\t\twhile (row != None) and (i < rows):\n\t\t\tif row[1] == ewcfg.life_state_kingpin or row[1] == ewcfg.life_state_grandfoe or row[1] == ewcfg.life_state_lucky:\n\t\t\t\trow = cursor.fetchone()\n\t\t\telse:\n\t\t\t\tentries.append(row)\n\t\t\t\trow = cursor.fetchone()\n\t\t\t\ti += 1\n\n\tfinally:\n\t\t# Clean up the database handles.\n\t\tcursor.close()\n\t\tbknd_core.databaseClose(conn_info)\n\n\treturn format_board(entries = entries, title = title, divide_by = divide_by)\ndef make_kingpin_board(server = None, title = \"\"):\n\tentries = []\n\ttry:\n\t\tconn_info = bknd_core.databaseConnect()\n\t\tconn = conn_info.get('conn')\n\t\tcursor = conn.cursor()\n\n\t\tcursor.execute(\"SELECT {name}, {state}, {faction}, {category} FROM users, players WHERE users.id_server = %s AND {state} = %s AND users.{id_user} = players.{id_user} ORDER BY {category} DESC\".format(\n\t\t\tname = ewcfg.col_display_name,\n\t\t\tstate = ewcfg.col_life_state,\n\t\t\tfaction = ewcfg.col_faction,\n\t\t\tcategory = ewcfg.col_slimes,\n\t\t\tid_user = ewcfg.col_id_user\n\t\t), (\n\t\t\tserver.id, \n\t\t\tewcfg.life_state_kingpin\n\t\t))\n\n\t\trows = cursor.fetchall()\n\t\tfor row in rows:\n\t\t\tentries.append(row)\n\n\tfinally:\n\t\t# Clean up the database handles.\n\t\tcursor.close()\n\t\tbknd_core.databaseClose(conn_info)\n\n\treturn format_board(entries = entries, title = title)\n\n\ndef make_district_control_board(id_server, title):\n\tentries = []\n\t\n\tdistricts = []\n\tfor poi in poi_static.poi_list:\n\t\tif poi.is_district:\n\t\t\tdistricts.append(poi.id_poi)\n\t\t\t\n\trowdy_districts = 0\n\tkiller_districts = 0\n\n\tfor district in districts:\n\t\tdistrict_data = EwDistrict(district = district, id_server = id_server)\n\t\tif district_data.controlling_faction == ewcfg.faction_rowdys:\n\t\t\trowdy_districts += 1\n\t\telif district_data.controlling_faction == ewcfg.faction_killers:\n\t\t\tkiller_districts += 1\n\n\trowdy_entry = [ewcfg.faction_rowdys.capitalize(), rowdy_districts]\n\tkiller_entry = [ewcfg.faction_killers.capitalize(), killer_districts]\n\n\treturn format_board(\n\t\tentries = [rowdy_entry, killer_entry] if rowdy_districts > killer_districts else [killer_entry, rowdy_entry],\n\t\ttitle = title,\n\t\tentry_type = ewcfg.entry_type_districts\n\t)\n\n#SLIMERNALIA\ndef make_slimernalia_board(server, title):\n\tentries = []\n\tdata = bknd_core.execute_sql_query(\n\t\t\"SELECT {display_name}, {state}, {faction}, FLOOR({festivity}) + COALESCE(sigillaria, 0) + FLOOR({festivity_from_slimecoin}) as total_festivity FROM users \"\\\n\t\t\"LEFT JOIN (SELECT id_user, COUNT(*) * 1000 as sigillaria FROM items INNER JOIN items_prop ON items.{id_item} = items_prop.{id_item} WHERE {name} = %s AND {value} = %s GROUP BY items.{id_user}) f on users.{id_user} = f.{id_user}, players \"\\\n\t\t\"WHERE users.{id_server} = %s AND users.{id_user} = players.{id_user} ORDER BY total_festivity DESC LIMIT 5\".format(\n\t\t\tid_user = ewcfg.col_id_user,\n\t\t\tid_server = ewcfg.col_id_server,\n\t\t\tid_item = ewcfg.col_id_item,\n\t\t\tfestivity = ewcfg.col_festivity,\n\t\t\tfestivity_from_slimecoin = ewcfg.col_festivity_from_slimecoin,\n\t\t\tname = ewcfg.col_name,\n\t\t\tdisplay_name = ewcfg.col_display_name,\n\t\t\tvalue = ewcfg.col_value,\n\t\t\tstate = ewcfg.col_life_state,\n\t\t\tfaction = ewcfg.col_faction\n\t\t), (\n\t\t\t\"id_furniture\",\n\t\t\tewcfg.item_id_sigillaria,\n\t\t\tserver.id\n\t\t)\n\t)\n\n\tfor row in data:\n\t\tentries.append(row)\n\t\n\treturn format_board(entries = entries, title = title)\n\n#SWILLDERMUK\ndef make_gambit_leaderboard(server, title, rows = 3):\n\tentries = []\n\t\n\tlowgambit = False\n\tif title == ewcfg.leaderboard_gambit_high:\n\t\tlowgambit = False\n\telse:\n\t\tlowgambit = True\n\t\n\ttry:\n\t\tconn_info = bknd_core.databaseConnect()\n\t\tconn = conn_info.get('conn')\n\t\tcursor = conn.cursor()\n\t\n\t\tcursor.execute(\n\t\t\t\"SELECT {name}, {state}, {faction}, {gambit} FROM users, players WHERE users.id_server = %s AND users.{id_user} = players.{id_user} ORDER BY {gambit} {order} LIMIT {limit}\".format(\n\t\t\t\tname=ewcfg.col_display_name,\n\t\t\t\tgambit=ewcfg.col_gambit,\n\t\t\t\tstate=ewcfg.col_life_state,\n\t\t\t\tfaction=ewcfg.col_faction,\n\t\t\t\tid_user=ewcfg.col_id_user,\n\t\t\t\torder=('DESC' if lowgambit == False else 'ASC'),\n\t\t\t\tlimit=rows\n\t\t\t), (\n\t\t\t\tserver.id,\n\t\t\t))\n\t\n\t\ti = 0\n\t\trow = cursor.fetchone()\n\t\twhile (row != None) and (i < rows):\n\t\t\tif row[1] == ewcfg.life_state_kingpin or row[1] == ewcfg.life_state_grandfoe or row[1] == ewcfg.life_state_lucky:\n\t\t\t\trow = cursor.fetchone()\n\t\t\telse:\n\t\t\t\tentries.append(row)\n\t\t\t\trow = cursor.fetchone()\n\t\t\t\ti += 1\n\n\tfinally:\n\t\t# Clean up the database handles.\n\t\tcursor.close()\n\t\tbknd_core.databaseClose(conn_info)\n\n\treturn format_board(entries=entries, title=title)\n\n\"\"\"\n\tconvert leaderboard data into a message ready string \n\"\"\"\ndef format_board(entries = None, title = \"\", entry_type = \"player\", divide_by = 1):\n\tresult = \"\"\n\tresult += board_header(title)\n\n\tfor entry in entries:\n\t\tresult += board_entry(entry, entry_type, divide_by)\n\n\treturn result\n\ndef board_header(title):\n\temote = None\n\temote2 = None\n\n\tbar = \" ▓▓▓▓▓\"\n\n\tif title == ewcfg.leaderboard_slimes:\n\t\temote = ewcfg.emote_slime2\n\t\tbar += \"▓▓▓ \"\n\n\telif title == ewcfg.leaderboard_slimecoin:\n\t\temote = ewcfg.emote_slimecoin\n\t\tbar += \" \"\n\n\telif title == ewcfg.leaderboard_ghosts:\n\t\temote = ewcfg.emote_negaslime\n\t\tbar += \"▓ \"\n\n\telif title == ewcfg.leaderboard_bounty:\n\t\temote = ewcfg.emote_slimegun\n\t\tbar += \"▓ \"\n\n\telif title == ewcfg.leaderboard_kingpins:\n\t\temote = ewcfg.emote_theeye\n\t\tbar += \" \"\n\n\telif title == ewcfg.leaderboard_districts:\n\t\temote = ewcfg.emote_nlacakanm\n\t\tbar += \" \"\n\n\telif title == ewcfg.leaderboard_donated:\n\t\temote = ewcfg.emote_slimecorp\n\t\tbar += \" \"\n\t\n\telif title == ewcfg.leaderboard_slimernalia:\n\t\temote = ewcfg.emote_slimeheart\n\t\tbar += \" \"\n\n\telif title == ewcfg.leaderboard_degradation:\n\t\temote = ewcfg.emote_slimeskull\n\t\tbar += \" \"\n\n\telif title == ewcfg.leaderboard_shamblers_killed:\n\t\temote = ewcfg.emote_slimeshot\n\t\tbar += \" \"\n\t\n\telif title == ewcfg.leaderboard_gambit_high:\n\t\temote = ewcfg.emote_janus1\n\t\temote2 = ewcfg.emote_janus2\n\t\tbar += \" \"\n\t\n\telif title == ewcfg.leaderboard_gambit_low:\n\t\temote = ewcfg.emote_janus1\n\t\temote2 = ewcfg.emote_janus2\n\t\tbar += \" \"\n\n\telif title == ewcfg.leaderboard_fashion:\n\t\temote = ewcfg.emote_111\n\t\tbar += \" \"\n\t\n\tif emote == None and emote2 == None:\n\t\tbar += \"▓▓\"\n\t\treturn bar + title + bar + \"\\n\"\n\tif emote2 != None:\n\t\treturn emote + bar + title + bar + emote2 + \"\\n\"\n\telse:\n\t\treturn emote + bar + title + bar + emote + \"\\n\"\n\ndef board_entry(entry, entry_type, divide_by):\n\tresult = \"\"\n\n\tif entry_type == ewcfg.entry_type_player:\n\t\tfaction = ewutils.get_faction(life_state = entry[1], faction = entry[2])\n\t\tfaction_symbol = ewutils.get_faction_symbol(faction, entry[2])\n\n\t\tnumber = int(entry[3] / divide_by)\n\n\t\tif number > 999999999:\n\t\t\tnum_str = \"{:.3e}\".format(number)\n\t\telse:\n\t\t\tnum_str = \"{:,}\".format(number)\n\n\t\tresult = \"{} `{:_>15} | {}`\\n\".format(\n\t\t\tfaction_symbol,\n\t\t\tnum_str,\n\t\t\tentry[0].replace(\"`\",\"\")\n\t\t)\n\n\telif entry_type == ewcfg.entry_type_districts:\n\t\tfaction = entry[0]\n\t\tdistricts = entry[1]\n\t\tfaction_symbol = ewutils.get_faction_symbol(faction.lower())\n\n\t\tresult = \"{} `{:_>15} | {}`\\n\".format(\n\t\t\tfaction_symbol,\n\t\t\tfaction,\n\t\t\tdistricts\n\t\t)\n\n\treturn result\n","sub_path":"ew/utils/leaderboard.py","file_name":"leaderboard.py","file_ext":"py","file_size_in_byte":18321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"282030766","text":"from anthill.framework.conf import settings\nfrom anthill.framework.core.exceptions import ImproperlyConfigured\nfrom anthill.framework.core.mail.asynchronous import send_mail\nfrom anthill.framework.utils.translation import translate as _\nfrom anthill.framework.handlers.socketio import SocketIOHandler\nfrom anthill.platform.handlers import UserHandlerMixin\nfrom anthill.platform.core.messenger.handlers.client_watchers import MessengerClientsWatcher\nfrom anthill.platform.core.messenger.client.exceptions import ClientError\nfrom anthill.platform.core.messenger.moderators import ModeratedException, moderate_message\nfrom tornado import template\nfrom typing import Optional\nimport user_agents\nimport socketio\nimport logging\n\n\nlogger = logging.getLogger('anthill.application')\n\n\nclass MessengerNamespace(socketio.AsyncNamespace):\n groups = ['__messenger__'] # Global groups. Must starts with `__` for security reason\n direct_group_prefix = '__direct' # Must starts with `__`\n client_class = None\n is_notify_on_net_status_changed = True\n secure_direct = True\n secure_groups = True\n email_on_incoming_message = True\n clients = MessengerClientsWatcher(user_limit=0)\n\n ONLINE = 'online'\n OFFLINE = 'offline'\n\n def create_client(self, user=None):\n if self.client_class is None:\n raise ImproperlyConfigured('Client class is undefined')\n return self.client_class(user=user)\n\n async def get_client(self, sid):\n session = await self.get_session(sid)\n return session['client']\n\n async def get_request_handler(self, sid):\n session = await self.get_session(sid)\n return session['request_handler']\n\n async def send_net_status(self, sid, status: str) -> None:\n allowed = [self.ONLINE, self.OFFLINE]\n if status not in allowed:\n raise ValueError('Status must be in %s' % allowed)\n method = getattr(self, 'on_' + status)\n await method(sid)\n\n async def build_direct_group_with(self, user_id: str, sid, reverse: bool = False) -> str:\n client = await self.get_client(sid)\n items = [self.direct_group_prefix]\n if reverse:\n items += [user_id, client.get_user_id()]\n else:\n items += [client.get_user_id(), user_id]\n return '.'.join(items)\n\n async def get_groups(self, sid) -> list:\n client = await self.get_client(sid)\n groups = self.groups or []\n groups += await client.get_groups() or []\n\n # For testing purposes\n if 'test' not in groups and settings.DEBUG:\n groups.append('test')\n\n # Personal group\n personal_group = client.create_personal_group()\n if personal_group not in groups:\n groups.append(personal_group)\n\n return groups\n\n def get_participants(self, group: str):\n return self.server.manager.get_participants(self.namespace, room=group)\n\n def enter_groups(self, sid, groups) -> None:\n for group in groups:\n self.enter_room(sid, group)\n\n def leave_groups(self, sid, groups) -> None:\n for group in groups:\n self.leave_room(sid, group)\n\n # noinspection PyMethodMayBeStatic\n def retrieve_group(self, data):\n group = data.get('group')\n trusted = data.get('trusted', False)\n if not trusted:\n if group.startswith('__'): # System group\n raise ValueError('Not valid group name: %s' % group)\n return group\n\n async def online(self, sid, user_id):\n \"\"\"Check if user online.\"\"\"\n client = await self.get_client(sid)\n group = client.create_personal_group(user_id)\n return bool(next(self.get_participants(group), None))\n\n async def on_connect(self, sid, environ):\n request_handler = environ['tornado.handler']\n session = await self.get_session(sid)\n\n current_user = request_handler.current_user\n client = self.create_client(user=current_user)\n await client.authenticate()\n\n session['client'] = client\n session['request_handler'] = request_handler\n\n self.enter_groups(sid, await self.get_groups(sid))\n\n if self.is_notify_on_net_status_changed:\n await self.send_net_status(sid, self.ONLINE)\n\n async def on_disconnect(self, sid):\n if self.is_notify_on_net_status_changed:\n await self.send_net_status(sid, self.OFFLINE)\n self.leave_groups(sid, self.rooms(sid))\n\n async def on_message(self, sid, data):\n pass\n\n # Supported messages client can send\n\n # Client actions\n\n # GROUPS\n\n async def on_create_group(self, sid, data):\n client = await self.get_client(sid)\n personal_group = client.create_personal_group()\n group_name = data.get('name')\n group_data = data.get('data')\n content = {\n 'user': {\n 'id': client.get_user_id()\n }\n }\n try:\n await client.create_group(group_name, group_data)\n except ClientError as e:\n content['error'] = str(e)\n await self.emit('create_group', data=content, room=personal_group)\n else:\n for sid_ in self.get_participants(personal_group):\n self.enter_room(sid_, group_name)\n await self.emit('create_group', data=content, room=group_name)\n\n async def on_delete_group(self, sid, data):\n client = await self.get_client(sid)\n group = self.retrieve_group(data)\n content = {\n 'user': {\n 'id': client.get_user_id()\n }\n }\n try:\n await client.delete_group(group)\n except ClientError as e:\n content['error'] = str(e)\n personal_group = client.create_personal_group()\n await self.emit('delete_group', data=content, room=personal_group)\n else:\n await self.emit('delete_group', data=content, room=group)\n await self.close_room(room=group)\n\n async def on_update_group(self, sid, data):\n client = await self.get_client(sid)\n group = self.retrieve_group(data)\n\n async def on_join_group(self, sid, data):\n client = await self.get_client(sid)\n group = self.retrieve_group(data)\n personal_group = client.create_personal_group()\n content = {\n 'user': {\n 'id': client.get_user_id()\n }\n }\n try:\n await client.join_group(group)\n except ClientError as e:\n content['error'] = str(e)\n await self.emit('join_group', data=content, room=personal_group)\n else:\n for sid_ in self.get_participants(personal_group):\n self.enter_room(sid_, group)\n await self.emit('join_group', data=content, room=group)\n\n async def on_leave_group(self, sid, data):\n client = await self.get_client(sid)\n group = self.retrieve_group(data)\n personal_group = client.create_personal_group()\n content = {\n 'user': {\n 'id': client.get_user_id()\n }\n }\n try:\n await client.leave_group(group)\n except ClientError as e:\n content['error'] = str(e)\n await self.emit('leave_group', data=content, room=personal_group)\n else:\n for sid_ in self.get_participants(personal_group):\n self.leave_room(sid_, group)\n await self.emit('leave_group', data=content, room=group)\n\n # /GROUPS\n\n # MESSAGES\n\n async def send_email_on_incoming_message(self, data, group, my_client):\n participants = self.get_participants(group)\n clients = [await self.get_client(s) for s in participants]\n clients.remove(my_client)\n recipient_list = (c.user.email for c in clients)\n loader = template.Loader(settings.TEMPLATE_PATH)\n subject = _('New incoming message')\n message = loader.load(\"incoming_message_email.txt\").generate(**data)\n html_message = loader.load(\"incoming_message_email.html\").generate(**data)\n from_email = settings.DEFAULT_FROM_EMAIL\n await send_mail(\n subject, message, from_email, recipient_list,\n fail_silently=False, html_message=html_message)\n\n async def on_create_message(self, sid, data):\n content_type = data.get('content_type', 'text/plain')\n group = self.retrieve_group(data)\n text = data.get('data')\n event_id = data.get('event_id')\n client = await self.get_client(sid)\n content = {\n 'user': {\n 'id': client.get_user_id()\n },\n 'content_type': content_type,\n 'event_id': event_id\n }\n\n # Moderation\n try:\n await moderate_message(text)\n except ModeratedException as e:\n content['error'] = '\\n'.join(e.messages)\n personal_group = client.create_personal_group()\n await self.emit('create_message', data=content, room=personal_group)\n return 'ERR', event_id, '\\n'.join(e.messages)\n # /Moderation\n\n try:\n message_kwargs = {'data': data, 'content_type': content_type}\n message_id = await client.create_message(group, message_kwargs)\n except ClientError as e:\n content['error'] = str(e)\n personal_group = client.create_personal_group()\n await self.emit('create_message', data=content, room=personal_group)\n return 'ERR', event_id, str(e)\n else:\n content['payload'] = {'id': message_id, 'data': data}\n await self.emit('create_message', data=content, room=group)\n if self.email_on_incoming_message:\n await self.send_email_on_incoming_message(data, group, client)\n return 'OK', event_id, message_id\n\n async def on_enumerate_group(self, sid, data):\n client = await self.get_client(sid)\n\n async def on_get_messages(self, sid, data):\n client = await self.get_client(sid)\n\n async def on_delete_messages(self, sid, data):\n client = await self.get_client(sid)\n\n async def on_update_messages(self, sid, data):\n client = await self.get_client(sid)\n\n async def on_read_messages(self, sid, data):\n client = await self.get_client(sid)\n\n # /MESSAGES\n\n # /Client actions\n\n # System actions\n\n async def on_typing_started(self, sid, data):\n \"\"\"Typing text message started.\"\"\"\n client = await self.get_client(sid)\n group = self.retrieve_group(data)\n content = {\n 'user': {\n 'id': client.get_user_id()\n }\n }\n await self.emit('typing_started', data=content, room=group, skip_sid=sid)\n\n async def on_typing_stopped(self, sid, data):\n \"\"\"Typing text message stopped.\"\"\"\n client = await self.get_client(sid)\n group = self.retrieve_group(data)\n content = {\n 'user': {\n 'id': client.get_user_id()\n }\n }\n await self.emit('typing_stopped', data=content, room=group, skip_sid=sid)\n\n async def on_sending_file_started(self, sid, data):\n \"\"\"Sending file started.\"\"\"\n client = await self.get_client(sid)\n group = self.retrieve_group(data)\n event_id = data.get('event_id')\n content = {\n 'user': {\n 'id': client.get_user_id()\n },\n 'content_type': None,\n 'event_id': event_id,\n 'preview': None\n }\n await self.emit('sending_file_started', data=content, room=group, skip_sid=sid)\n\n async def on_sending_file_stopped(self, sid, data):\n \"\"\"Sending file stopped.\"\"\"\n client = await self.get_client(sid)\n group = self.retrieve_group(data)\n event_id = data.get('event_id')\n content = {\n 'user': {\n 'id': client.get_user_id()\n },\n 'event_id': event_id\n }\n await self.emit('sending_file_stopped', data=content, room=group, skip_sid=sid)\n\n async def on_online(self, sid):\n request_handler = await self.get_request_handler(sid)\n user_agent = user_agents.parse(\n request_handler.request.headers.get('User-Agent'))\n client = await self.get_client(sid)\n content = {\n 'user': {\n 'id': client.get_user_id()\n },\n 'device': {\n 'family': user_agent.device.family,\n 'brand': user_agent.device.brand,\n 'model': user_agent.device.model\n },\n 'os': {\n 'family': user_agent.os.family,\n 'version': user_agent.os.version_string\n }\n }\n for group in self.rooms(sid):\n await self.emit(self.ONLINE, data=content, room=group, skip_sid=sid)\n\n async def on_offline(self, sid):\n client = await self.get_client(sid)\n user_id = client.get_user_id()\n content = {\n 'user': {\n 'id': user_id\n }\n }\n is_online = await self.online(sid, user_id)\n if not is_online:\n for group in self.rooms(sid):\n await self.emit(self.OFFLINE, data=content, room=group, skip_sid=sid)\n\n # /System actions\n\n\nclass MessengerHandler(SocketIOHandler, UserHandlerMixin):\n def check_origin(self, origin):\n return True\n # TODO: configuration from settings.py\n # return super().check_origin(origin)\n","sub_path":"core/messenger/handlers/transports/socketio.py","file_name":"socketio.py","file_ext":"py","file_size_in_byte":13580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"36231314","text":"import random\r\nimport time\r\n\r\ninventory = []\r\ninventorysize = 10\r\nmoney = 0\r\nvalidActions = [\"grow\", \"sell\", \"bank\", \"inventory\"]\r\n\r\ndef grow():\r\n\tprogress = 0\r\n\twaittime = random.randrange(1, 11) / 10\r\n\tstates = 7\r\n\r\n\tprint(\"Growing wheat\")\r\n\tfor i in range(states+1):\r\n\t\ttime.sleep(waittime)\r\n\t\tprogress += 1\r\n\r\n\tprint(\"Your wheat has succesfully growed\")\r\n\tcollect()\r\n\r\ndef collect():\r\n\tif inventorysize <= 0:\r\n\t\tprint(\"No empty slot\")\r\n\t\tprint(\"Crop scrapped\")\r\n\telse:\r\n\t\tprint(\"Crop collected\")\r\n\t\tinventory.append(\"wheat\")\r\n\r\ndef sell():\r\n\tif \"wheat\" in inventory:\r\n\t\tprint(\"Your wheat has sold for $10\")\r\n\t\treturn money + 10\r\n\telse:\r\n\t\tprint(\"No wheat is in your inventory\")\r\n\t\tprint(\"Please grow some and sell some more\")\r\n\r\ndef bank():\r\n\tprint(\"You have\", \"$\" + str(money))\r\n\r\n\r\ndef invsee():\r\n\tif len(inventory) > 0:\r\n\t\tprint(\"Your inventory contains:\", str(len(inventory)) + \"x wheat\")\r\n\telse:\r\n\t\tprint(\"Your inventory is empty\")\r\n\tprint(\"Inventory slots:\", inventorysize-len(inventory))\r\n\r\n#Main program\r\nwhile True:\r\n\tprint(\"\\nValid actions:\", \", \".join(validActions))\r\n\tplayerAction = input(\">>\")\r\n\r\n\tif playerAction == \"end\":\r\n\t\tbreak\r\n\telif playerAction == \"grow\":\r\n\t\tgrow()\r\n\telif playerAction == \"sell\":\r\n\t\tmoney = sell()\r\n\telif playerAction == \"bank\":\r\n\t\tbank()\r\n\telif playerAction == \"inventory\":\r\n\t\tinvsee()\r\n","sub_path":"farmGame.py","file_name":"farmGame.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"58965187","text":"#!/usr/bin/env python\n__doc__ = \"\"\"\n\nAsynchronous Sampler.\n\nNicholas Turner , 2017\nKisuk Lee , 2016-2017\n\"\"\"\n\nimport os\nimport h5py\n\nfrom Queue import Queue\nfrom threading import Thread\n\n\ndef sampler_daemon(sampler, q):\n \"\"\" Function run by the thread. \"\"\"\n while True:\n sample = sampler(imgs=[\"input\"])\n q.put(sample, block=True, timeout=None)\n\n\nclass AsyncSampler(object):\n \"\"\"\n Wrapper class for asynchronous sampling functions.\n \"\"\"\n\n def __init__(self, sampler, workers=1, queue_size=40):\n self.q = Queue(queue_size)\n self.t = list()\n for w in range(workers):\n t = Thread(target=sampler_daemon, args=(sampler, self.q))\n t.daemon = True\n t.start()\n self.t.append(t)\n\n def get(self):\n \"\"\"Pulls a sample from the queue.\"\"\"\n sample = self.q.get(block=True, timeout=None)\n self.q.task_done()\n return sample\n","sub_path":"src/utils/async_sampler.py","file_name":"async_sampler.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"422781227","text":"\"\"\"\nAllows to track the request_id in the logs, the DB and others. Adds a c2c_request_id attribute\nto the Pyramid Request class to access it.\n\"\"\"\nimport logging\nimport urllib.parse\nimport uuid\nfrom typing import List, Any, Optional, Dict, Sequence # noqa # pylint: disable=unused-import\n\nimport pyramid.request\nimport requests.adapters\nimport requests.models\nfrom pyramid.threadlocal import get_current_request\n\nfrom c2cwsgiutils import _utils, stats\n\nID_HEADERS: List[str] = []\n_HTTPAdapter_send = requests.adapters.HTTPAdapter.send\nLOG = logging.getLogger(__name__)\nDEFAULT_TIMEOUT: Optional[float] = None\n\n\ndef _gen_request_id(request: pyramid.request.Request) -> str:\n for id_header in ID_HEADERS:\n if id_header in request.headers:\n return request.headers[id_header] # type: ignore\n return str(uuid.uuid4())\n\n\ndef _patch_requests() -> None:\n def send_wrapper(\n self: requests.adapters.HTTPAdapter,\n request: requests.models.PreparedRequest,\n timeout: Optional[float] = None,\n **kwargs: Any,\n ) -> requests.Response:\n pyramid_request = get_current_request()\n header = ID_HEADERS[0]\n if pyramid_request is not None and header not in request.headers:\n request.headers[header] = pyramid_request.c2c_request_id\n\n if timeout is None:\n if DEFAULT_TIMEOUT is not None:\n timeout = DEFAULT_TIMEOUT\n else:\n LOG.warning(\"Doing a %s request without timeout to %s\", request.method, request.url)\n\n status = 999\n timer = stats.timer()\n try:\n response = _HTTPAdapter_send(self, request, timeout=timeout, **kwargs)\n status = response.status_code\n return response\n finally:\n if request.url is not None:\n parsed = urllib.parse.urlparse(request.url)\n port = parsed.port or (80 if parsed.scheme == \"http\" else 443)\n if stats.USE_TAGS:\n key: Sequence[Any] = [\"requests\"]\n tags: Optional[Dict[str, Any]] = dict(\n scheme=parsed.scheme,\n host=parsed.hostname,\n port=port,\n method=request.method,\n status=status,\n )\n else:\n key = [\"requests\", parsed.scheme, parsed.hostname, port, request.method, status]\n tags = None\n timer.stop(key, tags)\n\n requests.adapters.HTTPAdapter.send = send_wrapper # type: ignore\n\n\ndef init(config: Optional[pyramid.config.Configurator] = None) -> None:\n global ID_HEADERS, DEFAULT_TIMEOUT\n ID_HEADERS = [\"X-Request-ID\", \"X-Correlation-ID\", \"Request-ID\", \"X-Varnish\", \"X-Amzn-Trace-Id\"]\n if config is not None:\n extra_header = _utils.env_or_config(config, \"C2C_REQUEST_ID_HEADER\", \"c2c.request_id_header\")\n if extra_header is not None:\n ID_HEADERS.insert(0, extra_header)\n config.add_request_method(_gen_request_id, \"c2c_request_id\", reify=True)\n\n DEFAULT_TIMEOUT = _utils.env_or_config(\n config, \"C2C_REQUESTS_DEFAULT_TIMEOUT\", \"c2c.requests_default_timeout\", type_=float\n )\n _patch_requests()\n\n if _utils.env_or_config(config, \"C2C_SQL_REQUEST_ID\", \"c2c.sql_request_id\", False):\n from . import _sql\n\n _sql.init()\n","sub_path":"c2cwsgiutils/request_tracking/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"121528277","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 09 13:50:13 2015\n\n@author: heiligenstein\n\"\"\"\n\nimport numpy\nimport random\n\n\n\ndef sigmoid(z):\n return 1 / (1 + numpy.exp(-z))\n\ndef normalize(input_layer):\n return (input_layer - mean_input)/std_input\n \ndef c0(array):\n return list(array).count(0)\n\ndef MAE(pred_ys, true_ys):\n return sum(numpy.abs(true_ys - pred_ys)) / len(pred_ys)\n\ndef MSE(pred_ys, true_ys):\n return sum((true_ys - pred_ys)**2) / len(pred_ys)\n\n\n# for randomly scattered 0s\ndef ran_z(input_array, pc):\n \"\"\"Imputes randomly scattered 0s.\"\"\"\n masked = numpy.array(input_array)\n rand = numpy.random.binomial(size=len(input_array), n=1, p=float(pc)/100)\n for i in xrange(len(rand)):\n if rand[i] == 1:\n masked[i] = 0\n return masked\n \n# for consecutive 0s\ndef ran_c(input_array, pc, len_outage):\n \"\"\"Ramdomly imputes consecutive 0s.\"\"\"\n # temperature is recorded every 10 mins, so 1 hr = 6 values.\n # 0 mapped to 1 value of data array \n if len_outage == 0:\n len_outage = 1\n else:\n len_outage = len_outage*6\n impc = numpy.array(input_array)\n o_0 = list(input_array).count(0)\n length = len(input_array)\n num_zeros = int(length * float(pc) / 100) - o_0\n num_outs = int(num_zeros/len_outage)\n remaining_zeros = (num_zeros)%(len_outage)\n for i in range(num_outs):\n rand = numpy.random.randint(0, high=(length - len_outage))\n j = len_outage\n while j != 0:\n if impc[rand%length] != 0:\n impc[rand%length] = 0\n j -= 1\n rand += 1\n if remaining_zeros != 0:\n rand2 = numpy.random.randint(0, high=(length - remaining_zeros))\n j2 = remaining_zeros\n while j2 != 0:\n if impc[rand2%length] != 0:\n impc[rand2%length] = 0\n j2 -= 1\n rand2 +=1\n return impc\n \n \ndef ran_nan(input_array, pc, len_outage):\n \"\"\"Ramdomly imputes consecutive 0s.\"\"\"\n # temperature is recorded every 10 mins, so 1 hr = 6 values.\n # 0 mapped to 1 value of data array \n if len_outage == 0:\n len_outage = 1\n else:\n len_outage = len_outage*6\n impc = numpy.array(input_array)\n o_0 = list(input_array).count(0)\n length = len(input_array)\n num_zeros = int(length * float(pc) / 100) - o_0\n num_outs = int(num_zeros/len_outage)\n remaining_zeros = (num_zeros)%(len_outage)\n for i in range(num_outs):\n rand = numpy.random.randint(0, high=(length - len_outage))\n j = len_outage\n while j != 0:\n if impc[rand%length] != numpy.nan:\n impc[rand%length] = numpy.nan\n j -= 1\n rand += 1\n if remaining_zeros != 0:\n rand2 = numpy.random.randint(0, high=(length - remaining_zeros))\n j2 = remaining_zeros\n while j2 != 0:\n if impc[rand2%length] != numpy.nan:\n impc[rand2%length] = numpy.nan\n j2 -= 1\n rand2 +=1\n return impc\n\n\n########################\n# ENERGY FORMULAS #\n########################\n\n \ndef energy_vh(vlayer, vbias, hlayer, hbias, W):\n ''' Function to compute the energy of joint configuration (v, h)'''\n # e = -sumVisibleBias - sumHiddenBias - sumVisibleWeightHidden\n sum_vb = numpy.dot(vlayer, vbias)\n sum_hb = numpy.dot(hlayer, hbias)\n vWh = numpy.dot(numpy.dot(vlayer, W), hlayer)\n E = -sum_vb - sum_hb - vWh\n return E\n \ndef energy_xyh(vlayer, vbias, hlayer, hbias, clayer, cbias, W, U):\n \"\"\"Function to compute the energy of visible vector given h and y\"\"\"\n # E(x, y, h) = -sumHiddenWeightVisible - sumvbiasVisible\n # - sumhbiasHidden - sumcbiasClass - sumHiddenUClass\n vWh = numpy.dot(numpy.dot(vlayer, W), hlayer)\n sum_vb = numpy.dot(vlayer, vbias)\n sum_hb = numpy.dot(hlayer, hbias)\n sum_cb = numpy.dot(clayer, cbias)\n hUc = numpy.dot(numpy.dot(clayer, U), hlayer)\n E = - vWh - sum_vb - sum_hb - sum_cb - hUc\n return E\n \n\ndef fe_v(vlayer, vbias, hbias, W):\n ''' Function to compute free energy of visible vector v'''\n # f_e = -sumVisibleBias - sumLog(1 + exp(sumVisibleWeight + hbias))\n sum_vb = numpy.dot(vlayer, vbias)\n vW = numpy.dot(vlayer, W)\n sumLog = sum(numpy.log(1 + numpy.exp(vW + hbias)))\n FE = -sum_vb - sumLog\n return FE\n \ndef fe_v1(vlayer, vbias, hlayerprobs, hbias, W): # works\n '''Free energy, expected energy minus the entropy'''\n #f(v) = -sumVisibleBias - sumProbH(sumVisibleWeight + hbias) + entropy\n sum_vb = numpy.dot(vlayer,vbias)\n xj = numpy.dot(vlayer,W) + hbias\n B = numpy.dot(hlayerprobs,xj)\n H = sum((hlayerprobs*numpy.log(hlayerprobs)) + \n ((1 - hlayerprobs)*numpy.log(1 - hlayerprobs)))\n FE1 = - sum_vb - B + H\n return FE1 \n\ndef fe_v2(vlayer, vbias, hbias, W): # works\n '''Free energy, expected energy minus the entropy'''\n #f(v) = -sumVisibleBias - sumProbH(sumVisibleWeight + hbias) + entropy\n sum_vb = numpy.dot(vlayer,vbias)\n xj = numpy.dot(vlayer,W) + hbias\n hlayerprobs = sigmoid(numpy.dot(vlayer,W) + hbias)\n B = numpy.dot(hlayerprobs,xj)\n H = sum((hlayerprobs*numpy.log(hlayerprobs)) + \n ((1 - hlayerprobs)*numpy.log(1 - hlayerprobs)))\n FE2 = - sum_vb - B + H\n return FE2\n\n\ndef fe_v3(vlayer, vbias, hbias, W): # works\n '''Elfwing formula for free energy, expected energy minus the entropy.\n Uses hlayers instead of hbias'''\n #f(v) = -sumVisibleBias - sumProbH(sumVisibleWeight + hbias) + entropy\n hlayerprobs = sigmoid(numpy.dot(vlayer,W) + hbias)\n vW = numpy.dot(vlayer, W)\n vWhp = numpy.dot(vW, hlayerprobs)\n sum_vb = numpy.dot(vlayer, vbias)\n sum_hhp = numpy.dot(hbias, hlayerprobs)\n H = sum((hlayerprobs*numpy.log(hlayerprobs)) + \n ((1 - hlayerprobs)*numpy.log(1 - hlayerprobs)))\n FE3 = -vWhp - sum_vb - sum_hhp + H\n return FE3\n \ndef fe_gbv(vlayer, vbias, hbias, W):\n '''Schmah function to compute free energy for GB values'''\n # f(gbv) = - sumLog(1 + exp(sumVisibleWeight + hbias)) + 1/2sum(Visible - vbias)^2\n vb = numpy.sum((vlayer - vbias)**2) / 2\n vW = numpy.dot(vlayer, W)\n sumLog = numpy.sum(numpy.log(1 + numpy.exp(vW + hbias)))\n FE_gbv = - sumLog + vb\n return FE_gbv\n \ndef fe_gbv_enrique(vlayer, vbias, hbias, W): \n wx_b = numpy.dot(vlayer, W) + hbias\n vbias_term = 0.5 * numpy.dot((vlayer - vbias), (vlayer - vbias).T)\n hidden_term = numpy.sum(numpy.log(1 + numpy.exp(wx_b)))\n return -hidden_term - vbias_term\n\ndef fe_vc(vlayer, hbias, W, clayer, cbias, U):\n ''' Function to compute free energy of visible vector v and class c'''\n # f_e = -sumClassBias - sumLog(1 + exp(sumVisibleWeight + hbias + sumClassWeight))\n sum_cb = numpy.dot(clayer, cbias)\n vWbcU = (numpy.dot(vlayer, W)) + hbias + (numpy.dot(clayer, U))\n sumLog = sum(numpy.log(1 + numpy.exp(vWbcU)))\n F_vc = -sum_cb - sumLog\n return F_vc\n\n\ndef exp_nrg(vlayer, vbias, hbias, W):\n A = numpy.dot(vlayer,vbias)\n xj = numpy.dot(vlayer,W) + hbias\n hlayerprobs = sigmoid(numpy.dot(vlayer,W) + hbias)\n B = numpy.dot(hlayerprobs,xj)\n EE = - A - B\n return -EE\n\n\ndef prob_h_given_v(vlayer, W, hbias):\n '''Function to compute conditional probability of hidden given visible'''\n # probhv = product of sigmoid(sumVisibleWeight + hbias)\n probs = sigmoid(numpy.dot(vlayer,W) + hbias)\n return reduce(lambda x, y: x*y, probs)\n \n\ndef prob_v_given_h(hlayer, W, vbias):\n '''Function to compute conditional probabilit of visible given hidden'''\n # probvh = product of \n probs = sigmoid(numpy.dot(hlayer,numpy.transpose(W)) + vbias)\n return reduce(lambda x, y: x*y, probs)\n\n","sub_path":"nrg.py","file_name":"nrg.py","file_ext":"py","file_size_in_byte":7753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"146123347","text":"import sys\nimport numpy as np\nfrom numpy import log \nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.stats import poisson, nbinom\nimport math\nfrom scipy.spatial.distance import cosine\nfrom scipy.optimize import minimize\nfrom scipy.special import gamma, psi, gammaln\n\nimport matplotlib\n#matplotlib.use('Agg')\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport pdb\n\ndef cost_nb(X,Lam, a):\n return np.sum((X + a) * np.log(Lam + a) - X * np.log(Lam))\n\ndef cost_nb_full(X, Lam, a):\n ll = gammaln(X + a) - gammaln(a)+ X * log(Lam/(Lam + a)) + a * log(a/(a+ Lam))\n return -np.sum(ll)\n\ndef cost_poiss(X, Lam, e = 0):\n return np.sum(Lam - X * np.log(Lam + e))\n\ndef cost_nb_full_m(X, Lam, a):\n ll = gammaln(X + a) - gammaln(a)+ X * log(Lam/(Lam + a)) + a * log(a/(a+ Lam))\n return -ll\n\ndef cost_poiss_m(X, Lam, e = 0):\n return Lam - X * np.log(Lam + e)\n \ndef holdout(counts, seed = 123):\n np.random.seed(seed)\n Y = np.random.binomial(counts,0.5)\n Yhat = counts - Y\n return Y, Yhat\n \ndef plot_pval(counts, Lam, seed = 123, title = \"\", outfile= \"\", save = True):\n np.random.seed(123)\n (n,p) = counts.shape\n C = np.random.uniform(size=(n,p))\n pval = C * poisson.cdf(counts-1, Lam) + (1-C) * poisson.cdf(counts, Lam)\n plt.hist(pval.flatten(), bins = np.linspace(0,1,100))\n plt.title(title)\n if save:\n plt.savefig(outfile)\n plt.close()\n\ndef plot_pval_nb(counts, Lam, a, seed = 123, title = \"\", outfile= \"\", save = True):\n np.random.seed(123)\n (n,p) = counts.shape\n probs = Lam/(a + Lam)\n C = np.random.uniform(size=(n,p))\n pval = C * nbinom.cdf(counts-1, a, 1-probs) + (1-C) * nbinom.cdf(counts, a, 1-probs)\n plt.hist(pval.flatten(), bins = np.linspace(0,1,100))\n plt.title(title)\n if save:\n plt.savefig(outfile)\n # else:\n # plt.show()\n plt.close()\n\ndef get_pvalue_pois(X, Lam,seed = 123):\n np.random.seed(seed)\n (n,p) = X.shape\n C = np.random.uniform(size=(n,p))\n pval = C * poisson.cdf(X-1, Lam) + (1-C) * poisson.cdf(X, Lam)\n return pval\n\n\ndef get_pvalue_nb(X, Lam, a, seed = 123):\n np.random.seed(seed)\n (n,p) = X.shape\n probs = Lam/(a + Lam)\n C = np.random.uniform(size=(n,p))\n pval = C * nbinom.cdf(X-1, a, 1-probs) + (1-C) * nbinom.cdf(X, a, 1-probs)\n return pval\n\ndef pvalue2binary(pvalm, val_range = [0.25,0.4]):\n out = (pvalm > val_range[0]) * (pvalm < val_range[1]).astype(int)\n return out\n\n\n\ndef get_resid(X, Lam, e = 0):\n # (obs - exp) / sqrt(exp)\n return (X - Lam)/np.sqrt(Lam + e)\n\n\ndef plot_pval_vs_counts(counts, log, Lam, seed = 123, title = \"\", outfile= \"\", save = True):\n np.random.seed(123)\n (n,p) = counts.shape\n C = np.random.uniform(size=(n,p))\n pval = C * poisson.cdf(counts-1, Lam) + (1-C) * poisson.cdf(counts, Lam)\n X = np.log10(counts + 1) if log else counts\n xlabel = \"log10(counts + 1)\" if log else \"counts\"\n plt.scatter(X.flatten(),pval.flatten())\n plt.xlabel(xlabel)\n plt.title(title)\n if save:\n plt.savefig(outfile)\n else:\n plt.show()\n plt.close()\n\ndef plot_pval_nb_vs_counts(counts,log, Lam, a, seed = 123, title = \"\", outfile= \"\", save = True):\n np.random.seed(123)\n (n,p) = counts.shape\n probs = Lam/(a + Lam)\n C = np.random.uniform(size=(n,p))\n pval = C * nbinom.cdf(counts-1, a, 1-probs) + (1-C) * nbinom.cdf(counts, a, 1-probs)\n X = np.log10(counts + 1) if log else counts\n xlabel = \"log10(counts + 1)\" if log else \"counts\"\n plt.scatter(X.flatten(),pval.flatten())\n plt.xlabel(xlabel)\n plt.title(title)\n if save:\n plt.savefig(outfile)\n else:\n plt.show()\n plt.close()\n\n\ndef plot_heatmap_count(counts, log = True, main = \"\",outfile = None, save = True):\n #pdb.set_trace()\n Z = np.log10(counts + 1) if log else counts\n title = \"n_genes vs n_sample (log(x + 1))\" if log else \"n_genes vs n_sample\"\n title = \"{}: {}\".format(main, title)\n (n,p) = counts.shape\n XB = np.linspace(0,p-1,p)\n YB = np.linspace(0,n-1,n)\n X,Y = np.meshgrid(XB,YB)\n\n plt.imshow(Z,interpolation='none',aspect='auto')\n plt.colorbar()\n plt.title(title)\n if save:\n plt.savefig(outfile)\n # else:\n # plt.show()\n plt.close()\n\ndef plot_hist_count(counts, outfile):\n pdf_pages = PdfPages(outfile)\n fig = plt.figure()\n plt.hist(np.log10(counts+1).flatten(), bins = 100)\n plt.xlabel(\"log10(count +1)\")\n plt.title(\"hist of count\")\n pdf_pages.savefig(fig)\n plt.close(fig)\n\n fig = plt.figure()\n try: \n plt.hist(np.log10(counts.mean(axis = 0)), bins = 100)\n except:\n plt.hist(np.log10((counts).mean(axis = 0) + 1e-16), bins = 100)\n plt.xlabel(\"log10(count average)\")\n plt.title(\"hist of average count across genes\")\n pdf_pages.savefig(fig)\n plt.close(fig)\n\n fig = plt.figure()\n try:\n plt.hist(np.log10(counts.mean(axis = 1)), bins = 100)\n except:\n plt.hist(np.log10((counts).mean(axis = 1) + 1e-16), bins = 100)\n plt.xlabel(\"log10(count average)\")\n plt.title(\"hist of average count across samples\")\n pdf_pages.savefig(fig)\n pdf_pages.close()\n plt.close(fig)\n\n\n# def poisson2multinom (F, L):\n# Lnew = L * np.sum(F,0)\n# s = np.sum(Lnew,1)\n# Lnew = (Lnew.T / s).T\n# Fnew = F / np.sum(F,0)\n# return Fnew, Lnew\n\ndef poisson2multinom(F,L):\n col_F = F.sum(axis = 0)\n F_ = F/col_F.reshape(1, -1)\n L_ = L * col_F.reshape(1, -1)\n L_ /= L_.sum(axis = 1).reshape(-1,1)\n return F_, L_\n\ndef multinom2poisson(X, L, F):\n L = np.diag(X.sum(axis = 1)) @ L\n return L, F\n\n## Y_trans_{ij} = log(Y_ij/alpha + 1)\ndef log_transform(counts, alpha = 1, pseudo = False):\n if pseudo:\n return (np.exp(counts) - 1)*alpha\n else:\n return np.log(counts/alpha + 1)\n\n# X is (n_sample, n_feature) count matrix\n# s = sum_j X_ij (s_i is the sum for sample i)\n# y_ij = ln( x_ij / (s_i/median(s)) + 1) (edgeR uses mean instead of median)\n## not sure if it makes sense to transform back in the same way \n\ndef log_transform_bysample(counts):\n s = counts.sum(axis = 1)\n alpha = s/np.median(s)\n return np.log(counts/alpha[:,None] + 1)\n\ndef log_transform_bysample_back(Lam, counts):\n s = counts.sum(axis = 1)\n alpha = s/np.median(s)\n return (np.exp(Lam) - 1)*alpha[:,None]\n\n\n# ## L Ft = L Df Df^{-1} Ft = (L Df) Ft_hat\n# ## L_hat = normalize(L Df, byrow); F_hat = Ft_hat.T\n\n# def poisson2multinom(L,F):\n# #Lam = L @ F.T\n \n# Ft = F.T.copy()\n# del F\n# rowsum_Ft = Ft.sum(axis = 1)\n# Ft_hat = Ft/rowsum_Ft[:,None]\n# L_hat = L.dot(np.diag(rowsum_Ft))\n# #Dl = L_hat.sum(axis = 1)\n# L_hat = L_hat/L_hat.sum(axis = 1)[:,None]\n \n# #Lam_ = np.diag(Dl) @ L_hat @ Ft_hat\n# #print(np.isclose(Lam, Lam_).all())\n# return L_hat, Ft_hat.T\n \ndef align_topics(F1,F2):\n ind = []\n for k1 in range(F1.shape[1]):\n best = (-1,-math.inf)\n for k2 in range(F1.shape[1]):\n similarity = 1 - cosine(F1[:,k1], F2[:,k2])\n if similarity > best[1]:\n best = (k2, similarity)\n ind.append(best[0])\n return ind\n\n\n# def negll_nb(a, args):\n# X, Lam = args[0], args[1]\n\n# def negll_nb(a, X, Lam):\n# if a < 1e-06:\n# return -1e+16\n# ll = log(gamma(X + a)) - log(gamma(a)) + a*log(a) - (X + a)*log(Lam + a)\n# ll = ll.sum()\n# return -ll\n\ndef negll_nb(a, args):\n X, Lam = args[0], args[1]\n if a < 1e-06:\n return -1e+16\n ll = gammaln(X + a) - gammaln(a) + a*log(a) - (X + a)*log(Lam + a)\n ll = ll.sum()\n return -ll\n\ndef negll_nb_der(a, args):\n X, Lam = args[0], args[1]\n out = psi(a + X) - psi(a) - log(Lam + a) - (X + a)/(Lam + a) + log(a) + 1\n return - np.sum(out)\n\ndef estimate_a_nb(X, Lam, a0, xtol = 16-6, maxiter = 50, disp = True):\n args = [X, Lam]\n res = minimize(negll_nb, a0, method='Newton-CG', jac=negll_nb_der, args = (args),\n options={'xtol': xtol, 'disp': disp, 'maxiter':maxiter})\n return res.x[0]\n\n\ndef sim_nb_nmf(n,p,k,a, seed = 123):\n np.random.seed(seed)\n L = np.exp(np.random.uniform(size = (n,k)))\n F = np.exp(np.random.uniform(size = (p,k)))\n Lam = L @ F.T\n probs = Lam/(a + Lam)\n X = np.random.negative_binomial(n = a, p = 1-probs, size = (n,p))\n return X, Lam, L, F\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"code/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":8348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"51371477","text":"from galaxy.util.resources import resource_path\nfrom galaxy_test.base import api_asserts\nfrom ..base.api import ShedApiTestCase\n\nCOLUMN_MAKER_PATH = resource_path(__package__, \"../test_data/column_maker/column_maker.tar\")\n\n\nclass TestShedRepositoriesApi(ShedApiTestCase):\n def test_create(self):\n populator = self.populator\n category_id = populator.new_category(prefix=\"testcreate\").id\n\n response = self.api_interactor.get(f\"categories/{category_id}/repositories\")\n api_asserts.assert_status_code_is_ok(response)\n repos = response.json()[\"repositories\"]\n assert len(repos) == 0\n\n populator.new_repository(category_id)\n response = self.api_interactor.get(f\"categories/{category_id}/repositories\")\n api_asserts.assert_status_code_is_ok(response)\n repos = response.json()[\"repositories\"]\n assert len(repos) == 1\n\n def test_update_repository(self):\n populator = self.populator\n prefix = \"testupdate\"\n category_id = populator.new_category(prefix=prefix).id\n repository = populator.new_repository(category_id, prefix=prefix)\n repository_id = repository.id\n repository_update = populator.upload_revision(\n repository_id,\n COLUMN_MAKER_PATH,\n )\n assert repository_update.is_ok\n\n # used by getRepository in TS client.\n def test_metadata_simple(self):\n populator = self.populator\n repository = populator.setup_column_maker_repo(prefix=\"repoformetadata\")\n repository_metadata = populator.get_metadata(repository)\n metadata_for_revisions = repository_metadata.__root__\n assert len(metadata_for_revisions) == 1\n only_key = list(metadata_for_revisions.keys())[0]\n assert only_key.startswith(\"0:\")\n only_revision = list(metadata_for_revisions.values())[0]\n assert only_revision\n assert only_revision.downloadable\n assert not only_revision.malicious\n\n def test_index_simple(self):\n populator = self.populator\n repo = populator.setup_column_maker_repo(prefix=\"repoforindex\")\n repository_id = repo.id\n show_response = self.api_interactor.get(f\"repositories/{repository_id}\")\n index_response = self.api_interactor.get(\"repositories\")\n api_asserts.assert_status_code_is_ok(show_response)\n api_asserts.assert_status_code_is_ok(index_response)\n repository_ids = [r[\"id\"] for r in index_response.json()]\n assert repository_id in repository_ids\n\n repository = self.populator.get_repository_for(repo.owner, repo.name)\n assert repository.owner == repo.owner\n assert repository.name == repo.name\n\n def test_get_ordered_installable_revisions(self):\n # Used in ephemeris...\n populator = self.populator\n repository = populator.setup_column_maker_repo(prefix=\"repoforindex\")\n assert repository.owner\n assert repository.name\n revisions = populator.get_ordered_installable_revisions(repository.owner, repository.name)\n assert len(revisions.__root__) == 1\n\n def test_reset_on_repository(self):\n populator = self.populator\n repository = populator.setup_column_maker_repo(prefix=\"repoforreseta\")\n assert repository.owner\n assert repository.name\n revisions = populator.get_ordered_installable_revisions(repository.owner, repository.name)\n assert len(revisions.__root__) == 1\n metadata_response = populator.reset_metadata(repository)\n assert metadata_response.start_time\n assert metadata_response.stop_time\n assert metadata_response.status == \"ok\"\n assert len(metadata_response.repository_status) == 1\n revisions = populator.get_ordered_installable_revisions(repository.owner, repository.name)\n assert len(revisions.__root__) == 1\n\n def test_repository_search(self):\n populator = self.populator\n repository = populator.setup_column_maker_repo(prefix=\"repoforreposearch\")\n populator.reindex()\n results = populator.repo_search_query(\"repoforreposearch\")\n assert len(results.hits) == 1\n first_hit = results.hits[0]\n assert first_hit.repository.name == repository.name\n assert first_hit.repository.times_downloaded == 0\n","sub_path":"lib/tool_shed/test/functional/test_shed_repositories.py","file_name":"test_shed_repositories.py","file_ext":"py","file_size_in_byte":4314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"354952453","text":"#=====================================================================\n\n# Chan Hyun (Charles) Park [cpark1@macalester.edu]\n# September 2014\n# Zappos Software Engineering Intern position\n# Step 1 - Challenge\n\n# Must use Python 2.7.6!!! (In order to access Global Module)\n# https://docs.python.org/2/py-modindex.html\n\n# API key: 52ddafbe3ee659bad97fcce7c53592916a6bfd73\n# Documentation: http://developer.zappos.com/docs/api-documentation/\n\n#=====================================================================\n\n\nimport urllib2, json, yaml, time, math\nfrom pprint import pprint\n\n\ndef main():\n\tnumberOfProducts = int(input(\"How many products do you want? \"))\n\tdesiredMaxValue = int(input(\"How much are you willing to spend? (w/o $ sign) \"))\n\ttry:\n\t\tlistPriceGenerator = listOfPrices(numberOfProducts, desiredMaxValue)\n\t\t#print(x)\n\n\t\tpricesOfItems = approximation(numberOfProducts,desiredMaxValue,listPriceGenerator)\n\t\t#print(pricesOfItems)\n\n\t\tretrieveProduct(pricesOfItems)\n\t\t#products = retrieveProduct(pricesOfItems)\n\t\t#print(products)\n\n\texcept:\n\t\tprint(\"You do not have enough money to buy %s products!\" % numberOfProducts)\n\n\ndef listOfPrices(numberOfProducts, desiredMaxValue):\n\t#A = time.time()\n\treq = urllib2.Request(url='http://api.zappos.com/Search?term=&facets=[%22price%22]&facetSort=name&excludes=[%22results%22]&key=52ddafbe3ee659bad97fcce7c53592916a6bfd73')\n\tf = urllib2.urlopen(req)\n\tfacet = yaml.load(f.read())\n\tprice = facet['facets'][0]['values']\n\tlistOfPrices = []\n\tn = 0\n\tdifPrices = len(price)\n\twhile(n= numberOfProducts)):\n\t\tfor i in range(middle-numberOfProducts,middle+2):\n\t\t\tapproximation = 0\n\t\t\tfor j in range(numberOfProducts):\n\t\t\t\tapproximation += listOfPrices[i+j]\n\t\t\tif approximation > maxApproximation and approximation <= desiredMaxValue:\n\t\t\t\tmaxApproximation = approximation\n\t\t\t\titems = [listOfPrices[k] for k in range(i,i+numberOfProducts)]\n\n\telif(middle > (n-1) - numberOfProducts): #fixes when middle + numberOfProducts is > len(listOfPrices) AKA sad face\n\t\tfor i in range(middle-numberOfProducts,n-numberOfProducts+1):\n\t\t\tapproximation = 0\n\t\t\tfor j in range(numberOfProducts):\n\t\t\t\tapproximation += listOfPrices[i+j]\n\t\t\tif approximation > maxApproximation and approximation <= desiredMaxValue:\n\t\t\t\tmaxApproximation = approximation\n\t\t\t\titems = [listOfPrices[k] for k in range(i,i+numberOfProducts)]\n\n\telse: #if(middle < numberOfProducts): #fixes when middle - numberOfProducts is < 0 AKA super sad face\n\t\tfor i in range(middle+2):\n\t\t\tapproximation = 0\n\t\t\tfor j in range(numberOfProducts):\n\t\t\t\tapproximation += listOfPrices[i+j]\n\t\t\tif approximation > maxApproximation and approximation <= desiredMaxValue:\n\t\t\t\tmaxApproximation = approximation\n\t\t\t\titems = [listOfPrices[k] for k in range(i,i+numberOfProducts)]\n\n\treturn maxApproximation, items\n\n\ndef retrieveProduct(pricesOfItems):\n\t\"\"\"This function takes the prices of items and retrieves the corresponding \n\tproduct information. \n\t\"\"\"\n\n\tprices = pricesOfItems[1]\n\tcountOccurrences = [[x, prices.count(x)] for x in set(prices)]\n\n\tproducts = []\n\tfor i in range(len(countOccurrences)):\n\t\turlConcat = \"http://api.zappos.com/Search?term=&filters={%22price%22:[\\\"\" + str(countOccurrences[i][0]) + \"\\\"]}&limit=\" + str(countOccurrences[i][1]) + \"&key=52ddafbe3ee659bad97fcce7c53592916a6bfd73\"\n\t\treq = urllib2.Request(url=urlConcat)\n\t\tf = urllib2.urlopen(req)\n\t\tfacet = json.loads(f.read())\n\t\tpprint(facet['results'])\n\t\tproducts.append(facet['results'])\n\tprint(\"Total cost: $\" + str(pricesOfItems[0]))\n\treturn products\n\n\nmain()\n\n\n","sub_path":"Challenge.py","file_name":"Challenge.py","file_ext":"py","file_size_in_byte":5550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"36625380","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def sortedListToBST(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: TreeNode\n \"\"\"\n def findMid(head):\n slow=fast=head\n prev=None\n while fast and fast.next:\n prev=slow\n slow=slow.next\n fast=fast.next.next\n prev.next=None\n return slow\n if not head:\n return None\n if not head.next:\n return TreeNode(head.val)\n mid=findMid(head)\n root=TreeNode(mid.val)\n root.left=self.sortedListToBST(head)\n root.right=self.sortedListToBST(mid.next)\n \n return root\n","sub_path":"Algorithms/Linked List/109_Convert Sorted List to Binary Search Tree.py","file_name":"109_Convert Sorted List to Binary Search Tree.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"107786912","text":"\"\"\" \nTranslationparty module.\n\"\"\"\n \nfrom .util.decorators import command\nimport requests\nfrom .util.data import www_headers as headers\nfrom threading import Thread\n \n@command('translationparty')\ndef translationparty(bot, nick, target, chan, arg):\n if not arg:\n return bot.msg(chan, \"Usage: translationparty - \")\n args = arg.split()\n langarg = args[0].split('-')\n langpair = [langarg[0]+\"|\"+langarg[1], langarg[1]+\"|\"+langarg[0]]\n try:\n iters = int(args[1])\n except ValueError:\n return bot.msg(chan, \"Usage: translationparty - \")\n if iters > 10:\n return bot.msg(chan, \"Maximum iterations is 10!\")\n strings = [' '.join(args[2:])]\n tpThread(bot, target, chan, langpair, iters, strings).start()\n\nclass tpThread(Thread):\n def __init__(self, bot, target, chan, langpair, iters, strings):\n Thread.__init__(self)\n self.bot = bot\n self.target = target\n self.chan = chan\n self.langpair = langpair\n self.iters = iters\n self.strings = strings\n\n def run(self):\n url = \"http://api.mymemory.translated.net/get\"\n for i in range(self.iters*2):\n params = {'q': self.strings[i-1], 'langpair': self.langpair[i%2], 'de': 'sam@tehsvk.net'}\n response = requests.get(url, headers=headers, params=params)\n data = response.json()\n if 'INVALID TARGET LANGUAGE' in data['responseData']['translatedText']:\n return self.bot.msg(self.chan, \"%s: Invalid target language.\" % (self.target))\n self.strings.append(data['responseData']['translatedText'])\n self.bot.msg(self.chan, \"%s: %d - %s\" % (self.target, i, self.strings[i-1]))\n","sub_path":"plugins/translationparty.py","file_name":"translationparty.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"10668516","text":"from extender import *\nfrom rnd import Random\n\n\nclass Documentary:\n def __init__(self):\n self.name = \"\"\n self.year = 0\n self.duration = 0\n\n def read_str_array(self, strArray, i):\n # должно быт как минимум три непрочитанных значения в массиве\n if i >= len(strArray) - 2:\n return 0\n self.name = str(strArray[i])\n self.year = int(strArray[i+1])\n self.duration = int(strArray[i+2])\n i += 3\n return i\n\n def print(self):\n print(\"Documentary: name = \", self.name, \"year = \", self.year, \"duration = \", self.duration,\n \"task = \", self.task())\n\n def write(self, ostream):\n ostream.write(\"Documentary: name = {} year = {} duration = {}, task = {}\".\n format(self.name, self.year, self.duration, self.task()))\n\n def random_print(self):\n r = Random()\n self.name = r.generate_random_string(10)\n self.year = r.generate_random_int(1900, 2022)\n self.duration = r.generate_random_int(30, 300)\n print(\"Documentary: name = \", self.name, \"year = \", self.year, \"duration = \", self.duration,\n \"task = \", self.task())\n\n def random_write(self, ostream):\n ostream.write(\"Documentary: name = {} year = {} duration = {}, task = {}\".\n format(self.name, self.year, self.duration, self.task()))\n\n def task(self):\n ans = 0.0\n ans = self.year / len(self.name)\n return ans\n","sub_path":"AVS/documentary.py","file_name":"documentary.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"17718631","text":"\n\ndef make_string(seq):\n \"\"\"\n Don't throw an exception when given an out of range character.\n \"\"\"\n string = ''\n for c in seq:\n # Screen out non-printing characters\n if 32 <= c and c < 256:\n string += chr(c)\n # If no printing chars\n if not string:\n return str(seq)\n return string\n\n\ndef make_string_uc(seq):\n \"\"\"\n Special version to deal with the code in the first 8 bytes of a user comment.\n First 8 bytes gives coding system e.g. ASCII vs. JIS vs Unicode.\n \"\"\"\n #code = seq[0:8]\n seq = seq[8:]\n # Of course, this is only correct if ASCII, and the standard explicitly\n # allows JIS and Unicode.\n return make_string( make_string(seq) )\n","sub_path":"exifread/tags/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"147410102","text":"import operator\nimport time\nimport datetime\nimport os\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport numpy as np\nimport pandas as pd\n\nimport p2funcs as p2f\nfrom scipy import interp\n\nfrom sklearn.model_selection import StratifiedKFold, cross_val_score, KFold, cross_val_predict, cross_validate, train_test_split\nfrom sklearn.metrics import accuracy_score, roc_curve, auc\nfrom sklearn.preprocessing import scale, normalize\nfrom sklearn.decomposition import KernelPCA, PCA\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.metrics.pairwise import laplacian_kernel, chi2_kernel\n\nfrom scipy import interp\n\n\n#To show runtime of script\nStartTime = time.time()\n\n# Time to differentiate images\nnow = datetime.datetime.now()\nnowdate = now.strftime(\"%Y-%m-%d\")\nnowtime = now.strftime(\"%H-%M\")\n\n# Name of script to trace where images came from\nscriptname = 'modeltest5'\n\n#List of toy datasets to test\n#dataset_list = ['022', '023', '024']\ndataset_list = ['022']\n#dataset_list = ['mesa']\n\n#provide values of gamma to test for all kernel methods\nt1_gamma_list = [2e-7, 0.000002, 0.00002, 0.0002, 0.002, 0.02, 0.2, 2.0]\n\n# Optimal gamma decided by 2-tiered grid search\n\n# Collect optimal gamma from each dataset\nopt_t1_gammas = []\nopt_t2_gammas = []\n\n#Lists of datasets and target arrays \ndatalist = []\n\nfor dataset in dataset_list: \n\n #Create directory if directory does not exist\n filepath = '../../figs/out/%s/%s/%s/' % (scriptname, nowdate, dataset)\n \n if not os.path.exists(filepath):\n os.makedirs(filepath)\n \n #Import data and target\n if dataset == 'mesa':\n X = pd.read_csv('../../data/mesa/MESA_CPMG_MBINV2_ManuallyBinnedData_BatchCorrected_LogTransformed_Data.csv', sep=',')\n \n X = p2f.filt_imp(X, 0.1)\n \n y = np.load('../../data/mesa/mesatarget.npy')\n else:\n X = pd.read_csv('../../data/simulated/mvnsim/mvnsim' + dataset + '.csv', sep=',', header=0, index_col=0)\n y = np.load('../../data/simulated/mvnsim/target' + dataset + '.npy')\n datalist.append((dataset, X, y))\n \n #Scale initial data to centre \n X_scaled = scale(X)\n \n #Declare list for t1 mean mean area under ROC curve (mma) values \n t1_mmas = []\n \n # Create initial variables to update\n max_t1_mma = 0\n opt_t1_gamma = 0\n \n print('\\n### TIER 1 GRID SEARCH ###')\n \n for gamma in t1_gamma_list:\n \n t1_auc_mat, t1_kpcas, t1_models = p2f.m_test5(X_scaled, y, gamma, dataset, filepath, 'tier1')\n \n t1_mmas.append(t1_auc_mat.mean())\n \n # Select optimal t1 gamma \n for i in range(len(t1_mmas)):\n if t1_mmas[i] > max_t1_mma:\n max_t1_mma = t1_mmas[i]\n opt_t1_gamma = t1_gamma_list[i]\n \n # Show optimal gamma\n #print('Optimal Tier 1 gamma for dataset %s found to be %s' %(dataset, opt_t1_gamma))\n opt_t1_gammas.append(opt_t1_gamma)\n\n# End of dataset run \nprint('\\n###################################################################')\n\n# Print aggregate gamma values\nfor i in range(len(opt_t1_gammas)):\n print('\\nOptimal Tier 1 gamma for dataset %s found to be %s' %(dataset_list[i], opt_t1_gammas[i])) \n\n# Count number of each gamma \nt1_gcount_dict = dict((x,opt_t1_gammas.count(x)) for x in set(opt_t1_gammas))\n\n# Find most frequent gamma value\nt1_gamma_consensus = max(t1_gcount_dict, key=t1_gcount_dict.get)\n\n#Just in case last value selected:\nif t1_gamma_consensus == t1_gamma_list[-1]:\n t1_gamma_consensus = t1_gamma_list[-2]\n\n# Create tier 2 gamma list\ngamma_i_t1 = t1_gamma_list.index(t1_gamma_consensus)\nt2_gamma_list = list(p2f.frange(t1_gamma_list[gamma_i_t1], t1_gamma_list[gamma_i_t1+1], t1_gamma_list[gamma_i_t1]))\n\n\n \nfor dataset, X, y in datalist:\n \n #Scale initial data to centre \n X_scaled = scale(X)\n \n #Declare list for t2 mean mean area under ROC curve (mma) values \n t2_mmas = []\n \n # Create initial variables to update\n max_t2_mma = 0\n opt_t2_gamma = 0\n \n print('\\n### TIER 2 GRID SEARCH ###')\n \n for gamma in t2_gamma_list:\n \n t2_auc_mat, t2_kpcas, t2_models = p2f.m_test5(X_scaled, y, gamma, dataset, filepath, 'tier2')\n \n t2_mmas.append(t2_auc_mat.mean())\n \n # Select optimal t1 gamma \n for i in range(len(t2_mmas)):\n if t2_mmas[i] > max_t2_mma:\n max_t2_mma = t2_mmas[i]\n opt_t2_gamma = t2_gamma_list[i]\n \n # Show optimal gamma\n #opt_t2_gammas.append(opt_t2_gamma)\n\n# End of dataset run \nprint('\\n###################################################################\\n')\n\n# Print aggregate gamma values\nfor i in range(len(opt_t2_gammas)):\n print('\\nOptimal Tier 2 gamma for dataset %s found to be %s' % (dataset_list[i], opt_t2_gammas[i])) \n\n# Count number of each gamma \nt2_gcount_dict = dict((x,opt_t2_gammas.count(x)) for x in set(opt_t2_gammas))\n \n# Find most frequent gamma value\nt2_gamma_consensus = max(t2_gcount_dict, key=t2_gcount_dict.get)\n\n#print(\"\\nOptimal gamma parameter after 2-tiered grid search: %s\" % t2_gamma_consensus)\n\n# Create tier 2 gamma list\ngamma_i_t2 = t1_gamma_list.index(t1_gamma_consensus)\n\n\n\n#Calculate and display time taken or script to run\nEndTime = (time.time() - StartTime)\nprint(\"\\nTime taken for script to run is %.2f seconds\\n\" % EndTime)","sub_path":"scripts/mltest/modeltest5.py","file_name":"modeltest5.py","file_ext":"py","file_size_in_byte":5672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"308567815","text":"from collections import defaultdict\nimport re\n\ndef train_unigram(input_file_path, model_file_path):\n\n\tcounts = defaultdict(lambda: 0)\n\ttotal_count = 0\n\tmodel_result = {}\n\n\twith open(input_file_path, 'r') as input_file:\n\t\tfor line in input_file:\n\t\t\tline = line.strip()\n\t\t\tline = re.sub(r'  ', '', line)\n\t\t\twords = line.split(\" \")\n\t\t\twords.append(\"\")\n\t\t\tfor word in words:\n\t\t\t\tcounts[word] += 1\n\t\t\t\ttotal_count += 1\n\n\twith open(model_file_path, 'w') as model_file:\n\t\tfor word, count in counts.items():\n\t\t\tprobability = float(count) / total_count\n\t\t\tmodel_result[word] = probability\n\t\t\tprint('{}\\t{}'.format(word, probability), file=model_file)\n\n","sub_path":"lifan/tutorial03/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"649723043","text":"#Precipher stuff just to prepare the text and analyse\n\n\ndef alphaShift(myString, shift):\n '''\n In Caesar shift style shifts every letter in myString along by shift amount.\n >>> alphaShift('abc', 2)\n 'cde'\n '''\n shift = shift % 26\n newString = ''\n for char in myString:\n if char.lower() in alphabet:\n newString += alphabet[(alphabet.index(char.lower()) + shift) % 26]\n else:\n newString += char\n return(newString)\n\n\ndef clean(myString):\n '''\n Removes all spaces, punctuation and capitals from myString\n '''\n newString = ''\n for char in myString:\n if char.lower() in alphabet:\n newString += char.lower()\n return(newString)\n\n\ndef popular(myString, strLength, limit):\n '''\n Finds the 'limit' most common ministrings in myString, in descending\n order of popularity.\n Returns a list where each element is [ministring, occurenceCount].\n strLength is the length of ministring to search for e.g. 3 letters\n\n >>> popular(myString, 3, 5)\n [['the', 20], ['and', 14], ['tha', 10], ['ate', 8], ['oot', 2]]\n\n '''\n\n # mydir will contain string:occurences for every strLength string.\n mydir = {}\n for index in range(len(myString) - strLength + 1):\n try:\n mydir[myString[index:index + strLength]] += 1\n except KeyError:\n mydir[myString[index:index + strLength]] = 1\n\n # Now to turn mydir into a list and sort by most occurences first.\n mylist = []\n for thing in mydir:\n mylist.append([thing, mydir[thing]])\n\n mylist.sort(key=lambda x: x[1], reverse=True)\n mylist = mylist[:limit]\n return(mylist)\n","sub_path":"precipher.py","file_name":"precipher.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"501714535","text":"from classes import util\n\nif __name__ == \"__main__\":\n objPattern = util.Pattern(5, \"*\")\n objPattern.square()\n print (\"\\n\\n\\n\")\n # objPattern = util.abc()\n # objPattern.a()\n objPattern.RightAngleTriangle()\n print (\"\\n\\n\\n\")\n objPattern.EquilateralTriangle()\n print (\"\\n\\n\\n\")\n objPattern.Diamond()\n print (\"\\n\\n\\n\")\n # objPattern = util.Pattern(5, \"*\")\n objPattern.Circle()","sub_path":"learning/modules/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"541812552","text":"###\n### Author: Mikaeri Ohana\n### Date: 8/27/2019\n###\n\nimport crawler\nimport sys\nimport emoji\nimport telebot\n\ntoken = sys.argv[1]\n \nbot = telebot.TeleBot(token)\n\n@bot.message_handler(commands=['start', 'help'])\ndef send_welcome(message):\n bot.reply_to(message, emoji.emojize(\"Hi! Welcome to the Crawler Bot! \\nTell me the topics you like the most and I'll search it on Reddit and bring information to you! \\n\\n>>Just type '/NadaPraFazer topic_one' \\nExample: /NadaPraFazer cats \\n\\nAND SEE THE MAGIC! :crystal_ball: \\n\\nWant to see more topics at the same time?\\nType with semicolon! \\nExample: /NadaPraFazer cats;dogs;turtles\"))\n\n@bot.message_handler(commands=['NadaPraFazer'])\ndef send_data(message):\n subreddits = str(message.text.split(\" \")[1])\n try:\n answer = crawler.crawl(subreddits)\n except:\n answer = emoji.emojize(\"Something has happened. Our friends are working on it. We'll be right back! :thumbs_up: \")\n \n bot.reply_to(message, answer)\n\nbot.polling()","sub_path":"crawlers/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"32902728","text":"'''\n\n107. Binary Tree Level Order Traversal II\nGiven a binary tree, return the bottom-up level order traversal of its nodes' values. (ie, from left to right, level by level from leaf to root).\n\nFor example:\nGiven binary tree [3,9,20,null,null,15,7],\n 3\n / \\\n 9 20\n / \\\n 15 7\nreturn its bottom-up level order traversal as:\n[\n [15,7],\n [9,20],\n [3]\n]\n\n107. 二叉树的层次遍历 II\n给定一个二叉树,返回其节点值自底向上的层次遍历。 (即按从叶子节点所在层到根节点所在的层,逐层从左向右遍历)\n\n例如:\n给定二叉树 [3,9,20,null,null,15,7],\n\n 3\n / \\\n 9 20\n / \\\n 15 7\n返回其自底向上的层次遍历为:\n\n[\n [15,7],\n [9,20],\n [3]\n]\n\n'''\n\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def levelOrderBottom(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n res = []\n def deep_traversal(root, deepth=0, res=[]):\n if not root:\n return\n if not res:\n res = [[root.val]]\n else:\n if len(res) < deepth + 1:\n res.append([root.val])\n else:\n res[deepth].append(root.val)\n deep_traversal(root.left, deepth + 1, res)\n deep_traversal(root.right, deepth + 1, res)\n return res\n res = deep_traversal(root, 0, res)\n return res[::-1] if res else []\n\n\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution1(object):\n def levelOrderBottom(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n self.vals = []\n def dfs(root, deepth):\n if not root:\n return\n if deepth < len(self.vals):\n self.vals[deepth].append(root.val)\n else:\n self.vals.append([root.val])\n dfs(root.left, deepth+1)\n dfs(root.right, deepth+1)\n\n dfs(root, 0)\n\n return self.vals[::-1]","sub_path":"algorithms/leetcode-101-binaryTreeLevelOrderTraversal2.py","file_name":"leetcode-101-binaryTreeLevelOrderTraversal2.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"89917306","text":"from django import forms\nfrom .models import *\n\n\nclass BankForm(forms.ModelForm):\n class Meta:\n model = Bank\n fields = '__all__'\n widgets = {\n 'branch_name': forms.TextInput(attrs={'placeholder': 'Branch Name', 'class': 'addbanktxt'}),\n 'ifsc_code': forms.TextInput(attrs={'placeholder': 'IFSC Code', 'class': 'addbanktxt'}),\n 'branch_address': forms.Textarea(attrs={'placeholder': 'Address', 'class': 'addbanktxtar'}),\n 'branch_contact': forms.TextInput(attrs={'placeholder': 'Contact Number', 'class': 'addbanktxt'}),\n }\n\n\nclass AccountForm(forms.ModelForm):\n class Meta:\n model = Account\n exclude = ['bank']\n fields = '__all__'\n widgets = {\n 'account_no': forms.TextInput(attrs={'class': 'cstmrtxt'}),\n 'account_holder': forms.TextInput(attrs={'class': 'cstmrtxt'}),\n 'account_type': forms.TextInput(attrs={'class': 'cstmrtxt'}),\n 'pancard_no': forms.TextInput(attrs={'class': 'cstmrtxt'}),\n 'address': forms.Textarea(attrs={'class': 'cstmrtxtarea'}),\n 'contact': forms.TextInput(attrs={'class': 'cstmrtxt'}),\n }\n\n\nclass TransactionForm(forms.ModelForm):\n class Meta:\n model = Transaction\n exclude = ['account', 'transaction_id']\n fields = '__all__'\n widgets = {\n 'transaction_type': forms.Select(attrs={'class': 'cstmrtxt'}),\n 'account_holder': forms.TextInput(attrs={'class': 'cstmrtxt'}),\n 'amount': forms.TextInput(attrs={'class': 'cstmrtxt'}),\n 'date': forms.DateInput(attrs={'class': 'cstmrtxt', 'type': 'date'}),\n 'time': forms.TimeInput(attrs={'class': 'cstmrtxt', 'type': 'time'}),\n }\n\n\nclass LoginForm(forms.Form):\n username = forms.CharField(max_length=50, widget=forms.TextInput(\n attrs={'placeholder': 'Username', 'class': 'addbanktxt'}))\n password = forms.CharField(max_length=50, widget=forms.PasswordInput(\n attrs={'placeholder': 'Password', 'class': 'addbanktxt'}))\n","sub_path":"Bankapp/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"414785650","text":"from django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.utils.decorators import method_decorator\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\nfrom .utils import movie_data, paginate, get_rating, HOSTNAME, HEADERS\nfrom ..models import Rating\nimport requests, json\n\n\ndef get_related_movies(tmdb_id):\n url='http://{}:8000/sim?movie_id={}'.format(HOSTNAME, tmdb_id)\n res = requests.get(\n url= url,\n headers= HEADERS,\n )\n return json.loads(res.content)['movies']\n\n@login_required\ndef render_template(request, num=0):\n tmdb_ids = get_related_movies(str(num))\n related = [get_rating(movie_data[k]) for k in tmdb_ids if k in movie_data]\n try:\n rated = Rating.objects.filter(movie=num, user=request.user).last().score\n except:\n rated = 0\n\n context = {\n 'rated': rated,\n 'current': get_rating(movie_data.get(str(num))),\n 'related': paginate(request, related, 4)\n }\n return render(request, 'pages/details.j2', context)\n\n\n\n@method_decorator(csrf_exempt, name='dispatch')\ndef add_rating(request):\n user_id = request.user.id\n rating = request.POST.get('rating')\n movie_id = request.POST.get('movie_id')\n try:\n r = Rating.objects.get(user=request.user, movie=movie_id)\n r.score=rating\n r.save()\n except:\n r = Rating(user=request.user, movie=movie_id, score=rating)\n r.save()\n print(user_id, rating, movie_id)\n url='http://{}:8000/add_rating'.format(HOSTNAME)\n data = {'user_id': user_id, 'movie_id': movie_id, 'rating': rating}\n res = requests.post(url=url, data=data)\n return JsonResponse({})\n","sub_path":"movies/views/details.py","file_name":"details.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"240435409","text":"\"\"\"\nTests several signal classes\n\"\"\"\n\n# python-native modules\n\n# third-party modules\nfrom numpy import arange\n\nfrom matplotlib import pyplot\n\n# project modules\nfrom simulator.backend.mathsignals.sinusoidal import SinusoidalViewModel\nfrom simulator.backend.mathsignals.step import StepViewModel\nfrom simulator.backend.mathsignals.square import SquareViewModel\nfrom simulator.backend.mathsignals.triangle import TriangleViewModel\n\n\ndef main():\n # Instantiate signal classes\n sinusoidal = SinusoidalViewModel()\n square = SquareViewModel()\n step = StepViewModel()\n triangle = TriangleViewModel()\n\n # Front-end will ask signal helper for required properties\n #\n # The given properties format:\n # properties = {\n # \"Amplitude\" : Property\n # }\n #\n # SignalProperty is well-known class in the project's modules\n # and you can read members \"property_type\" and \"property_options\" like:\n # + properties[\"Amplitude\"].property_type\n # + properties[\"Amplitude\"].property_options\n # Where property_types will be a PropertyType: String, Float, Options\n # and property_options will be a list with the available values that the property\n # can take\n #\n sin_properties = sinusoidal.get_properties()\n step_properties = step.get_properties()\n square_properties = square.get_properties()\n triangle_properties = triangle.get_properties()\n\n # Front-end action happens and properties are set, checked\n # and sent to the back-end who will evaluate signals\n sinusoidal[\"Amplitude\"].set(10.0)\n sinusoidal[\"Frequency\"].set(500)\n sinusoidal[\"Phase\"].set(0)\n\n step[\"Amplitude\"].set(10.0)\n\n triangle[\"Amplitude\"].set(10.0)\n triangle[\"Frequency\"].set(500)\n triangle[\"Duty\"].set(30)\n\n square[\"Amplitude\"].set(10.0)\n square[\"Frequency\"].set(500)\n square[\"Duty\"].set(30)\n\n # Check if properties are valid for the signal\n if sinusoidal:\n time_interval = arange(-1/100, 1 / 100, 1 / 1000000)\n\n # This will be done by the back-end\n sin_values = sinusoidal.get_model()(time_interval)\n\n # Values are returned to the front-end who plots the output\n pyplot.plot(time_interval, sin_values, label=\"Sinusoidal\")\n\n # Check if properties are valid for the signal\n if step:\n time_interval = arange(-1/100, 1 / 100, 1 / 1000000)\n\n # This will be done by the back-end\n step_values = step.get_model()(time_interval)\n\n # Values are returned to the front-end who plots the output\n pyplot.plot(time_interval, step_values, label=\"Step\")\n\n # Check if properties are valid for the signal\n if square:\n time_interval = arange(-1/100, 1 / 100, 1 / 1000000)\n\n # This will be done by the back-end\n square_values = square.get_model()(time_interval)\n\n # Values are returned to the front-end who plots the output\n pyplot.plot(time_interval, square_values, label=\"Square\")\n\n # Check if properties are valid for the signal\n if triangle:\n time_interval = arange(-1/100, 1 / 100, 1 / 1000000)\n\n # This will be done by the back-end\n triangle_values = triangle.get_model()(time_interval)\n\n # Values are returned to the front-end who plots the output\n pyplot.plot(time_interval, triangle_values, label=\"Triangle\")\n\n pyplot.legend()\n pyplot.show()\n\n\nmain()\n","sub_path":"tests/test_signals.py","file_name":"test_signals.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"409063452","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport numpy as np\nfrom scipy import integrate\n\nimport sympy as sp\nsp.init_printing(use_unicode=True)\n\n \ndef stopFunCombined(t, s, lst, events, out=[]):\n terminal = False\n cur_ivs = []\n sn = s.shape[0] + 1\n for event in events:\n if(type(event)!=str):\n ivar = event.ivar \n cur_iv = ivar(t, s)\n cur_ivs.append(cur_iv)\n\n if not lst: # fast way to check if lst is empty\n cur_cnt = []\n for event in events:\n if(type(event)!=str):\n cur_cnt.append(event.count)\n if not out:\n out.append(cur_cnt)\n else:\n out[0] = cur_cnt\n lst.append([*s,t,*cur_ivs])\n return 0\n lst.append([*s,t,*cur_ivs])\n cur_cnt = out[0]\n \n for i, event in enumerate(events):\n if(type(events[i])!=str):\n \n stopval=event.stopval\n direction=event.direction\n corr=event.corr\n isterminal=event.isterminal\n init_cnt=event.count\n \n cur_iv = cur_ivs[i]\n prev_iv = lst[-2][sn+i]\n\n f1 = (prev_iv < stopval) and (cur_iv > stopval) and ((direction == 1) or (direction == 0))\n f2 = (prev_iv > stopval) and (cur_iv < stopval) and ((direction == -1) or (direction == 0))\n if (f1 or f2) and ((cur_cnt[i] == -1) or (cur_cnt[i] > 0)):\n if cur_cnt[i] > 0:\n cur_cnt[i] -= 1\n \n out.append([i, # event index\n (-1 if cur_cnt[i]==-1 else init_cnt-cur_cnt[i]), # event trigger counter\n lst[-2].copy(), # state before event\n lst[-1].copy(), # state after event\n corr, # if correction is needed\n isterminal]) # if event is terminal\n if isterminal and ((cur_cnt[i] == -1) or (cur_cnt[i] == 0)):\n terminal = True\n \n if terminal:\n return -1\n \n return 0\n\nclass integrator_tool:\n def __init__(self, rtol, nmax, method=None, stopf=None):\n \n self.rtol = float(rtol) \n self.nmax = float(nmax) \n self.method = method \n self.atol = float(rtol)\n self.stopf = stopf\n \n def integrate_ode(self, model, s0, tspan, events=[], out=[]):\n retarr=True\n mu = model.mu1\n prop = integrate.ode(model.equation)\n\n if self.method != None:\n method = self.method\n prop.set_integrator(method, method=self.method, rtol=self.rtol)\n else:\n prop.set_integrator('dopri5')\n\n prop.set_initial_value(s0, tspan[0])\n prop.set_f_params(*[mu])\n\n lst = []\n if self.stopf!=None:\n prop.set_solout(lambda t, s: self.int_param['stopf'](t, s, lst, events, out))\n else:\n \n prop.set_solout(lambda t, s: stopFunCombined(t, s, lst, events, out))\n prop.integrate(tspan[1])\n del prop\n if len(out) > 0:\n cor_out = self.correctEvents(model, events, out, None, sn=len(s0))\n out.clear()\n out.extend(cor_out)\n \n if retarr:\n \n return np.asarray(lst)\n \n \n def correctEvents(self, model, events, evout, prop, sn):\n out = []\n tol=1e-14\n maxiter = 50\n for ev in evout[1:]:\n if ev[4] == False:\n out.append([ev[0], ev[1], ev[3][:sn+1], ev[5]])\n continue\n t, s = self.brent(model, events[ev[0]], ev[2][sn], ev[3][sn], ev[2][:sn], tol=tol, maxiter=maxiter)\n out.append([ev[0], ev[1], list(s)+[t], ev[5]]) \n return out \n\n def brent(self, model, event, t0, t1, s0, tol=1e-12, maxiter=50, debug=False):\n import scipy.optimize\n import math\n ivar = event.ivar\n stopval = event.stopval \n s_opt = [0]\n \n def fopt(t, s0, t0):\n if t == t0:\n s = s0.copy()\n else:\n s = model.integrator.integrate_ode(model, s0, [t0, t])[-1, :6]\n s_opt[0] = s\n fval = ivar(t, s) - stopval\n return math.fabs(fval)\n \n t_opt = scipy.optimize.brent(fopt, args=(s0, t0), brack=(t0, t1), tol=tol)\n\n return t_opt, s_opt[0] \n\n \n ","sub_path":"orbipyd/integrator.py","file_name":"integrator.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"176222840","text":"import sys\nimport numpy as np\nimport PlotterExceptions as pe\nfrom plotter import Plotter\n\n\nclass JgalileePlotter(Plotter):\n\n def __init__(self, output_filename, data_filename, points_per_cluster, figure_filename, indian_pines=False):\n \"\"\"set points_per_cluster != 0 to plot an artificial data set.\n set indian_pines = True to plot Indian Pines data set.\n points_per_cluster != 0 if and only if indian_pines=False.\"\"\"\n if points_per_cluster == 0 and indian_pines is False:\n raise Exception(\"Cannot construct an object with indian_pines=False and points_per_cluster=0\")\n super().__init__(figure_filename)\n self.__output__ = output_filename\n self.__data__ = data_filename\n self.__points_per_cluster__ = points_per_cluster\n self.__indian_pines__ = indian_pines\n self.__ip_spectral_centers__ = None\n\n def __read_artificial_centers__(self, content):\n centers = []\n\n lines = content.split('\\n')\n for l in lines:\n components = l[2:].split()\n centers.append([float(components[0]), float(components[1])])\n\n return centers\n\n def __read_indian_pines_centers__(self, content):\n centers = []\n self.__ip_spectral_centers__ = []\n\n lines = content.split('\\n')\n for l in lines:\n components = l[2:].split()\n centers.append([float(components[0]), float(components[1])])\n self.__ip_spectral_centers__.append([float(components[i]) for i in range(2, 202)])\n\n return centers\n\n def __read_artificial_points__(self, content):\n data = []\n\n lines = content.split('\\n')\n cluster = []\n i = 0\n for l in lines:\n if i == self.__points_per_cluster__:\n data.append(list(cluster))\n cluster.clear()\n i = 0\n components = l.split()\n cluster.append([float(components[0]), float(components[1])])\n i = i + 1\n data.append(list(cluster))\n\n return data\n\n def __read_indian_pines_points__(self, content):\n data = []\n for i in range(len(self.__ip_spectral_centers__)):\n data.append([])\n\n lines = content.split('\\n')\n for l in lines:\n components = l.split()\n spectral_part = [float(components[i]) for i in range(2, 202)]\n pixel_attr = [float(components[i]) for i in range(0, 2)]\n belonging_cluster = self.__nearest_center__(spectral_part)\n data[belonging_cluster].append(pixel_attr)\n\n return np.array(data)\n\n def __nearest_center__(self, spectral_part):\n dists = []\n for c in self.__ip_spectral_centers__:\n dists.append(self.__distance__(spectral_part, c))\n return dists.index(min(dists))\n\n def __distance__(self, point1, point2):\n \"\"\"squared euclidean distance\"\"\"\n sum = 0\n for p1, p2 in zip(point1, point2):\n sum += (p1 - p2)**2\n return sum\n\n def __read_points__(self, content):\n if self.__indian_pines__:\n return self.__read_indian_pines_points__(content)\n else:\n return self.__read_artificial_points__(content)\n\n def __read_centers__(self, content):\n if self.__indian_pines__:\n return self.__read_indian_pines_centers__(content)\n else:\n return self.__read_artificial_centers__(content)\n\n def __read__(self):\n with open(self.__data__, 'r') as f:\n data_file_content = f.read()\n with open(self.__output__, 'r') as f:\n centrois_file_content = f.read()\n centrois_file_content = centrois_file_content[:len(centrois_file_content)-1]\n\n centers = self.__read_centers__(centrois_file_content)\n data = self.__read_points__(data_file_content[:len(data_file_content)-1])\n\n if len(data) != len(centers):\n raise pe.InconsistentDataset()\n\n return np.array(data), np.array(centers)\n\ndef print_help():\n print(\"Program USAGE: [CENTERS] [DATA] [PTS] [OUTPUT] [ARTIFICIAL]\")\n print(\"\\t[CENTERS]: clustering resulting centers\")\n print(\"\\t[DATA]: dataset\")\n print(\"\\t[PTS]: points per cluster (ARTIFICIAL has to be True iff PTS is not equal 0)\")\n print(\"\\t[OUTPUT]: output filename\")\n print(\"\\t[ARTIFICIAL]: True if the data set is artificial (implies PTS not equal 0), False otherwise\")\n\ndef jgalilee_plotter_main():\n try:\n #output_filename, data_filename, points_per_cluster, figure_filename, indian_pines=False\n if sys.argv[1] == '-h' or sys.argv[1] == '--help' or sys.argv[1] is None:\n print_help()\n if sys.argv[5] == 'True':\n plotter = JgalileePlotter(sys.argv[1], sys.argv[2], int(sys.argv[3]), sys.argv[4], False)\n plotter.plot()\n elif sys.argv[5] == 'False':\n plotter = JgalileePlotter(sys.argv[1], sys.argv[2], int(sys.argv[3]), sys.argv[4], True)\n plotter.plot()\n else:\n print_help()\n\n except pe.PlotGeneratorError as e:\n print(e)\n except Exception as e:\n print(e)\n\n\njgalilee_plotter_main()\n","sub_path":"ChangeDetectionPipeline/programs/jgalileePlotter.py","file_name":"jgalileePlotter.py","file_ext":"py","file_size_in_byte":5169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"7355436","text":"from django.shortcuts import render\nfrom .utils import render_to_react\n\n\n# def index(request):\n# return render(request, \"index.html\")\n\n\n# Render to Django HTML & React.js JSX\n\ndef index(request):\n\n props = {\n \"name\": \"pato\",\n \"more\": \"Ken\"\n }\n\n template_name = \"index-jsx.html\"\n js_app_name = \"index.bundle.js\"\n\n return render_to_react(\n request,\n template=template_name,\n js_app_name=js_app_name,\n props=props\n )\n","sub_path":"02-1_djReact/server/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"549763822","text":"from flask import Flask,request, jsonify, make_response\nimport pymysql\nimport json\n\napp = Flask(__name__)\n\nconn = pymysql.connect(host=\"localhost\", port=3306, user=\"root\", passwd=\"root\", db=\"projectii\")\ncursor = conn.cursor()\n\n@app.route('/yhmpd',methods=['POST','GET'])\ndef yhmpd():\n NAME = str(request.args.get('yhm'))\n yhm = 'select * from Users where NAME = \"%s\"' % (NAME)\n yhm_res = cursor.execute(yhm)\n\n responsetext = {\n \"statusCode\": 200,\n \"data\": yhm_res\n }\n response = make_response(jsonify(responsetext))\n response.headers['Access-Control-Allow-Origin'] = '*'\n response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'\n response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'\n return response\n\n@app.route('/yhdl',methods=['POST','GET'])\ndef yhdl():\n PSWORD = str(request.args.get('mm'))\n NAME = str(request.args.get('yhm'))\n sql = 'select * from Users where NAME = \"%s\" and PSWORD=\"%s\"' % (NAME, PSWORD)\n res = cursor.execute(sql)\n responsetext = {\n \"statusCode\": 200,\n \"data\": res\n }\n response = make_response(jsonify(responsetext))\n response.headers['Access-Control-Allow-Origin'] = '*'\n response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'\n response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'\n return response\n\n@app.route('/yhzc',methods=['POST','GET'])\ndef yhzc():\n PSWORD = str(request.args.get('mm'))\n NAME = str(request.args.get('yhm'))\n sqlname = 'select * from Users where NAME = \"%s\"' % (NAME)\n sql_name = cursor.execute(sqlname)\n have_add = 2\n if (sql_name == 0):\n sql = \"insert into Users(NAME,PSWORD) values(%s,%s)\"\n cursor.execute(sql, [NAME, PSWORD])\n # cursor.execute(sql,(user,pwd))\n conn.commit()\n sqlname = 'select * from Users where NAME = \"%s\"' % (NAME)\n sql_name = cursor.execute(sqlname)\n if(sql_name == 1):\n have_add = 1\n else:\n have_add = 0\n elif(sql_name == 1):\n have_add = 2\n # return str(have_add)\n responsetext = {\n \"statusCode\": 200,\n \"data\": have_add\n }\n response = make_response(jsonify(responsetext))\n response.headers['Access-Control-Allow-Origin'] = '*'\n response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'\n response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'\n return response\n\n@app.route('/jf',methods=['POST','GET'])\ndef jf():\n NAME = str(request.args.get('yhm'))\n name_choose = 'select * from Scores where NAME = \"%s\"' % (NAME)\n cursor.execute(name_choose)\n res = cursor.fetchone()\n print(res)\n if (res == None):\n print(\"首次积分\")\n sql = \"insert into Scores(NAME,SCORES) values(%s,%s)\"\n cursor.execute(sql, [NAME, 0])\n conn.commit()\n\n name_choose = 'select * from Scores where NAME = \"%s\"' % (NAME)\n cursor.execute(name_choose)\n res = cursor.fetchone()\n print(res[1])\n name_choose_add = \"update Scores set SCORES='%s' where NAME='%s'\" % (res[1] + 10, NAME)\n cursor.execute(name_choose_add)\n conn.commit()\n cursor.execute(name_choose)\n res = cursor.fetchone()\n # return str(res[1])\n\n print(res[1])\n\n\n responsetext = {\n \"statusCode\": 200,\n \"data\": res[1]\n }\n response = make_response(jsonify(responsetext))\n response.headers['Access-Control-Allow-Origin'] = '*'\n response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'\n response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'\n return response\n\n\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"归档/PJ__7_Flask_Ajax/Python/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"362087599","text":"import turtle,operator\nwid = 20#柱状宽度\ndivi = 0.5#设置单位柱状高度与单词\nk = 1300#屏幕长\nh = 700#屏幕宽\nx1 = 600#x轴长\ndef post(x,word):#绘制位置,绘制单词(数量,单词)\n\tturtle.penup()\n\tturtle.goto(x,-h/2 + 100)\n\tturtle.pendown() \n\tturtle.color(\"red\")\n\tturtle.seth(90)\n\tturtle.fd(word[0] * divi)\n\tturtle.write(word[0])\n\tturtle.seth(0)\n\tturtle.fd(wid)\n\tturtle.seth(-90)\n\tturtle.fd(word[0] * divi)\n\tturtle.penup()\n\tturtle.fd(15)\n\tturtle.seth(180)\n\tturtle.fd(wid)\n\tturtle.pendown()\n\tturtle.write(word[1])\n\ndef graphy(words):\n\tturtle.title(\"词词频统计\")\n\tturtle.setup(k,h,0,0)#设置屏幕大小\n\t#绘制x,y轴\n\tturtle.color(\"blue\")\n\tturtle.pensize(5)\n\tturtle.penup()\n\tturtle.seth(180)\n\tturtle.fd(k/2 - 100)\n\tturtle.seth(-90)\n\tturtle.fd(h/2 - 100)\n\tturtle.pendown()\n\tturtle.seth(0)\n\tturtle.fd(x1)#x轴长度\n\tturtle.write(\"x\")\n\tturtle.seth(180)\n\tturtle.fd(x1)\n\tturtle.seth(90)\n\tturtle.fd(300)#y轴长度\n\tturtle.write(\"y\")\n\tfor i in range(10):#控制绘制出排名前几的单词\n\t\tpost(-k/2 + 100 + wid + wid * i * 2,words[-(i + 1)])\n\tturtle.done()\ndef main():\n\tfilename = input(\"请输入文件名称 : \").strip()\n\tf1 = open(filename,\"r\")\n\tstr = []\n\tfor line in f1:\n\t\tfor i in line[:-1]:\n\t\t\tif i in \"~@#$%^&*()_-+=<>?/,.:;{}[]|\\'1234567890\"\"\" :\n\t\t\t\tline = line[:-1].replace(i,' ')\n\t\tstr = str + line[:-1].lower().split()\n\tcount = {}\n\tfor word in str:\n\t\tcount.setdefault(word,0)\n\t\tcount[word] = count[word] + 1\n\t#pairs = list(count.items())\n\t#words = [[x,y]for (y,x)in pairs]\n\t#import opeartor word.sort(key = operator.itemgetter(1))\n\twords = [[x,y]for (y,x)in list(count.items())]\n\twords.sort()\n\tgraphy(words)\n\nmain()\n#artical.txt\n#当改变一个数值后关系到多个表达式,则用变量表示","sub_path":"python/python学习/字典/词汇统计.py","file_name":"词汇统计.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"513026118","text":"import numpy as np\nimport pandas as pd\nimport json\nfrom sklearn.naive_bayes import *\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\n\n\nwith open('train.json') as data_file:\n\ttrain_data = json.load(data_file)\n\nwith open('test.json') as data_file1:\n\ttest_data = json.load(data_file1)\n\ndef countmatrix(dict,num_cuisines,num_ingredients,cuisines,ingredients,train_data):\n\n\tcount_matrix = np.zeros((num_cuisines,num_ingredients))\n\n\trow=0\n\n\tfor cuisine in cuisines:\n\t\tingredients_per_cuisine = dict[cuisine]\n\n\t\tfor ingred in ingredients_per_cuisine:\n\t\t\t\n\t\t\tcolumn=ingredients.index(ingred)\n\t\t\tcount_matrix[row,column] = count_matrix[row,column]+1\n\n\t\trow += 1\n\n\treturn count_matrix\n\n\n\n\n#step1: Create ingredients for each cuisine\ndef ingred_for_cuisine(data):\n\tcuisine_and_ingred={}\n\t#all cuisines\n\tcuisines=[]\n\t# all ingredients\n\tingredients=[]\n\n\tfor i in range(len(data)):\n\t\tcuisine = data[i]['cuisine']\n\t\tingred_per_cuisine = data[i]['ingredients']\n\n\t\tif cuisine not in cuisine_and_ingred.keys():\n\t\t\tcuisine_and_ingred[cuisine] = ingred_per_cuisine\n\t\t\tcuisines.append(cuisine)\n\t\telse:\n\t\t\tcuisine_and_ingred[cuisine].extend(ingred_per_cuisine)\n\n\t\tingredients.extend(ingred_per_cuisine)\n\n\t#unique ingredients\n\tingredients = list(set(ingredients))\n\tnum_cuisines = len(cuisines)\n\tnum_ingredients = len(ingredients)\n\n\treturn cuisine_and_ingred,num_cuisines,num_ingredients,cuisines,ingredients\n\n\n\n\n\nif __name__ == \"__main__\":\n\tX_dictCuisineIngred , num_cuisines,num_ingredients,cuisines,ingredients =ingred_for_cuisine(train_data)\n\tX_countmatrix = countmatrix(X_dictCuisineIngred,num_cuisines,num_ingredients,cuisines,ingredients,train_data)\n\t#print X_countmatrix[0]\n\t#fopen(\"result.csv\",\"w\")\n\t\n\t\n\tX_test =[[0 for x in range(num_ingredients)] for y in range(len(test_data))]\n\n\trow1=0\n\tfor i in range(len(test_data)):\n\t\tfor row in test_data[i]['ingredients']:\n\t\t\tif row in ingredients:\n\t\t\t\tcolumn1=ingredients.index(row)\n\t\t\t\tX_test[row1][column1] += 1\n\t\trow1 += 1\n\n\tclf = MultinomialNB()\n\t#print type(X_countmatrix)\n\t#print type(cuisines)\n\tclf.fit(X_countmatrix,cuisines)\n\ty_test = clf.predict(X_test)\n\tf = open('results.csv','w')\n\tf.write(u'id,cuisine\\n')\n\tfor i in range(len(test_data)):\n\t\tf.write('%s,%s\\n'% (test_data[i]['id'],y_test[i]))\n\tf.close()\n","sub_path":"CS16BTECH11015_AML/Classification/Q4_2.py","file_name":"Q4_2.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"354785363","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 7 11:51:59 2018\n\n@author: Aeenf\n\"\"\"\nimport collections\nimport json\nimport logging\nimport random\nimport time\nfrom datetime import datetime\n\nimport pandas as pd\nimport requests\n\n\nclass DataSave(object):\n def __init__(self, pathfile):\n self.filepath = pathfile\n self.datenw = datetime.now().strftime('%m%d')\n self.get_log()\n\n def get_log(self):\n '''通过使用logging包将程序运行出错的地方进行记录'''\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename='D:\\\\爬虫日志\\\\Tmall_detail_download.log',\n filemode='w')\n\n def setheaders(self, item_id):\n headers = {\n 'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1',\n 'Referer': 'https://detail.m.tmall.com/item.htm?id={}'.format(item_id)\n }\n return headers\n\n def data_change(self, shop_name):\n '''从给定的文件里获取我想要的所有的URL'''\n shopname_url = collections.defaultdict()\n itemid_lst = []\n with open(self.filepath, encoding='utf-8') as f:\n for row in f.readlines():\n shopname_url[row.split('\\t')[1]] = row.split('\\t')[0]\n for one in shopname_url.items():\n if one[1] == shop_name:\n itemid_lst.append(one[0].strip('\\n'))\n return itemid_lst\n\n def get_shop_name(self):\n shopname_url = collections.defaultdict()\n with open(self.filepath, encoding='utf-8') as f:\n for row in f.readlines():\n shopname_url[row.split('\\t')[1]] = row.split('\\t')[0]\n shopname_lst = set(shopname_url.values())\n return shopname_lst\n\n def get_requests(self, itemid):\n header = self.setheaders(itemid)\n datenum = int((datetime.now()-datetime.strptime('1970-01-01 00:00:00', '%Y-%m-%d %H:%M:%S')).total_seconds()*1000)\n get_url = 'https://h5api.m.taobao.com/h5/mtop.taobao.detail.getdetail/6.0/?jsv=2.4.8&appKey=12574478&t={0}&sign=6298b3c5128b9220debf6963eb1d7873&api=mtop.taobao.detail.getdetail&v=6.0&dataType=jsonp&ttid=2017%40taobao_h5_6.6.0&AntiCreep=true&type=jsonp&callback=mtopjsonp2&data=%22itemNumId%22%3A%22{1}%22'.format(datenum, itemid)\n with requests.Session() as s:\n s.get(header['Referer'], headers=header)\n res = s.get(get_url, headers=header, timeout=30)\n time.sleep(random.random()*5)\n return json.loads(res.text[11:-1])\n\n def basic_info(self, json_data, item_id):\n \"\"\"对获取的数据进行规整\"\"\"\n # title = []\n contents = {}\n # 获取基本信息,商品详细介绍\n basic_info = json_data['data']['props']['groupProps'][0]['基本信息'] #列表��含字典\n for one_dict in basic_info:\n for x, y in one_dict.items():\n contents[x] = y\n api_item = json_data['data']['item']\n try:\n moduleDescParams_f = api_item['moduleDescParams']['f']\n api_item.pop('moduleDescParams')\n except KeyError as e:\n print('\\t', e)\n moduleDescParams_f = ''\n api_item.pop('exParams')\n api_item.pop('countMultiple')\n image_urls = api_item['images']\n image_url_str = ','.join(image_urls)\n api_item.update({'images': image_url_str})\n basic_info_df = pd.DataFrame(contents, index=[0])\n basic_info_df['itemId'] = str(item_id)\n api_item_df = pd.DataFrame(api_item, index=[0])\n result_df = pd.merge(basic_info_df, api_item_df, how='outer', on=['itemId'])\n result_df['moduleDescParams_f'] = moduleDescParams_f\n return result_df\n\n def get_sell_price(self, json_data):\n sell_counts = json.loads(json_data['data']['apiStack'][0]['value'])\n try:\n transmitPrice = sell_counts['price']['transmitPrice']['priceText']\n except Exception as e:\n transmitPrice = ''\n try:\n oprice = sell_counts['price']['extraPrices'][0]['priceText']\n except KeyError as e:\n print('\\t没有促销活动')\n oprice = ''\n try:\n sellCount = sell_counts['item']['sellCount']\n except KeyError:\n sellCount = ''\n if sell_counts['item']['videos']:\n sell_video = sell_counts['item']['videos'][0]['url']\n else:\n print('\\t没有宣传视频')\n sell_video = ''\n try:\n item_id = sell_counts['item']['itemId']\n except KeyError as e:\n print(e, '获取视频链接时出现异常')\n result = dict(zip(['itemId', 'transmitPrice', 'oprice', 'sellCount', 'sell_video'], [item_id, transmitPrice, oprice, sellCount, sell_video]))\n result_df = pd.DataFrame(result, index=[0])\n return result_df\n\n def get_time_count(self, json_data):\n try:\n sell_counts = json.loads(json_data['data']['apiStack'][0]['value'])\n startTime = sell_counts['vertical']['jhs']['startTime']\n endTime = sell_counts['vertical']['jhs']['endTime']\n soldCount = sell_counts['vertical']['jhs']['soldCount']\n except KeyError:\n startTime = endTime = soldCount = ''\n result = dict(zip(['startTime', 'endTime', 'soldCount'], [startTime, endTime, soldCount]))\n return result\n\n def get_keywords(self, json_data):\n try:\n keywords = json_data['data']['rate']['keywords']\n keyword_count = ['-'.join([i['word'], i['count']]) for i in keywords]\n str_keyword = ','.join(keyword_count)\n return str_keyword\n except KeyError as e:\n print('\\t', e, '没有关键词评语')\n return ''\n\n def main(self):\n # 获取所有的店铺列表\n for one_shopname in self.get_shop_name():\n # 通过传入一个店铺名称,获取他相关的宝贝id\n itemid_lst = self.data_change(one_shopname)\n print('{}共有{}个宝贝待访问'.format(one_shopname, len(itemid_lst)))\n info_result = []\n num = 1\n for one_itemid in itemid_lst:\n print('\\t{} {} {}正在访问中'.format(num, one_shopname, one_itemid))\n # 对一个id进行访问,获取所有的相关信息\n json_data = self.get_requests(one_itemid)\n basic_info = self.basic_info(json_data, one_itemid)\n str_keywords = self.get_keywords(json_data)\n price_video = self.get_sell_price(json_data)\n last_df = pd.merge(basic_info, price_video,how='outer', on=['itemId'])\n last_df['keywords'] = str_keywords\n print('\\t{} {} {}保存成功\\n'.format(num, one_shopname, one_itemid))\n # 创建一个以店铺为名字的csv文件,并返回文件名\n info_result.append(last_df)\n num += 1\n result_basic_info = pd.concat(info_result, sort=False, ignore_index=True)\n result_basic_info.to_excel('D:\\\\Python\\\\TmallCrawler\\\\file\\\\190504\\\\红色小象\\\\{}_{}.xlsx'.format(one_shopname, self.datenw))\n print('{}保存成功\\n'.format(one_shopname))\n return 'all is over'\n\nif __name__ == '__main__':\n pfile = \"D:\\\\Python\\\\TmallCrawler\\\\file\\\\name_itemid_giving\"\n tm = DataSave(pfile)\n tm.main()\n\n","sub_path":"TmallCrawler/Balabala/new_tmall_detail.py","file_name":"new_tmall_detail.py","file_ext":"py","file_size_in_byte":7690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"419293724","text":"# Create your views here.\n\n#creacion usuario\nfrom django.contrib.auth import login, authenticate, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.http.response import HttpResponseRedirect\nfrom django.shortcuts import render_to_response\nfrom django.template.context import RequestContext\n\n\n#creacion usuario\ndef nuevo_usuario(request):\n if request.method=='POST':\n formulario= UserCreationForm(request.POST)\n if formulario.is_valid:\n formulario.save()\n return HttpResponseRedirect('/')\n else:\n formulario = UserCreationForm()\n return render_to_response('nuevousuario.html',{'formulario':formulario},context_instance=RequestContext(request)) \n\n\n#Autenticacion usuarios\ndef ingresar(request):\n if not request.user.is_anonymous():\n return HttpResponseRedirect('/privado')\n if request.method == 'POST':\n formulario = AuthenticationForm(request.POST)\n if formulario.is_valid:\n usuario = request.POST['username']\n clave = request.POST['password']\n acceso = authenticate(username=usuario, password=clave)\n if acceso is not None:\n if acceso.is_active:\n login(request, acceso)\n return HttpResponseRedirect('/privado')\n else:\n return render_to_response('noactivo.html', context_instance=RequestContext(request))\n else:\n return render_to_response('nousuario.html', context_instance=RequestContext(request))\n else: \n formulario = AuthenticationForm()\n return render_to_response('ingresar.html', {'formulario':formulario}, context_instance=RequestContext(request))\n\n#Acceso restringido\n@login_required(login_url='/ingresar')\ndef privado(request):\n usuario = request.user\n return render_to_response('privado.html', {'usuario':usuario}, context_instance=RequestContext(request))\n\n#cierre de sesion\n@login_required(login_url='/ingresar')\ndef cerrar(request):\n logout(request)\n return HttpResponseRedirect('/')\n\n \n ","sub_path":"Biblioteca2/principal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"383633930","text":"import pandas as pd\n\ndef aver(inF):\n df = pd.read_table(inF, header=0)\n df2 = df.ix[:,[0,1]].copy()\n for i in range(2, df.shape[1], 2):\n s = df.columns[i].split('_')\n sample = '_'.join(s[0:-1])\n df2[sample] = (df.ix[:,i] + df.ix[:, i+1])/2.0\n\n df2.to_csv(inF + '-AverRep', sep='\\t', index=False)\naver('mTECs-GeneCountSymbol-Normalized')\naver('mTECs-PeakCounts-Table-Gene-Norm')\n\n","sub_path":"mTECs/22-ATACSeq-RNASeq/01-Cor/PeakVsNoPeak/02-average-replicates.py","file_name":"02-average-replicates.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"75767056","text":"a = \"\"\"toggle 322,558 through 977,958\n\"\"\"\nlights = []\nf = []\n\nfor n in range(1000):\n f.append(0)\n\nfor n in range(1000):\n lights.append(f)\n\ndef turn_on(x,y):\n lights[x][y] = 1\n return \n\ndef turn_off(x,y):\n lights[x][y] = 0\n return\n\ndef toggle(x,y):\n if lights[x][y] == 0:\n lights[x][y] = 1\n return\n elif lights[x][y] == 1:\n lights[x][y] = 0\n print(\"b\")\n return\n else:\n print(\"error\")\n\nb = a.split(\"\\n\")\nb.remove(\"\")\nfor n in range(len(b)):\n print(n)\n b[n] = b[n].replace(\"turn off\", \"turn_off\")\n b[n] = b[n].replace(\"turn on\", \"turn_on\")\n\nd = []\nc = []\nfor string in b:\n c = string.split(\" \", 1)\n\n g, h = c[1].split(\" through \")\n i, j = g.split(\",\"), h.split(\",\")\n\n x1, y1 = i\n x2, y2 = j\n\n d.append((c[0], int(x1), int(y1), int(x2), int(y2)))\n\nfor command, x1, y1, x2, y2 in d:\n if command == \"turn_off\":\n for x in range(x1, x2+1):\n for y in range(y1, y2+1):\n turn_off(x,y)\n elif command == \"turn_on\":\n for x in range(x1, x2+1):\n for y in range(y1, y2+1):\n turn_on(x,y)\n elif command == \"toggle\":\n for x in range(x1, x2+1):\n for y in range(y1, y2+1):\n toggle(x,y)\n else:\n print(\"Kein command\")\n break\n\ncounter = 0\nfor column in lights:\n counter += column.count(1)\nprint(counter)\n \n","sub_path":"day_6.py","file_name":"day_6.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"347055266","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n7.\tПо длинам трех отрезков, введенных пользователем, определить возможность\nсуществования треугольника, со��тавленного из этих отрезков. Если такой\nтреугольник существует, то определить, является ли он\nразносторонним, равнобедренным или равносторонним.\n\"\"\"\n\n\ndef is_triangle(a, b, c):\n if a + b > c and a + c > b and b + c > a:\n return True\n return False\n\n\ndef is_equilateral_triangle(a, b, c):\n return a == b == c\n\n\ndef is_right_triangle(a, b, c):\n a_sq, b_sq, c_sq = a ** 2, b ** 2, c ** 2\n if (a_sq + b_sq == c_sq\n or a_sq + c_sq == b_sq\n or b_sq + c_sq == a_sq):\n return True\n return False\n\n\ndef is_isosceles_triangle(a, b, c):\n if a == b or a == c or b == c:\n return True\n return False\n\n\nif __name__ == '__main__':\n a = float(input(\"Введите длину первой стороны треугольника: \"))\n b = float(input(\"Введите длину второй стороны треугольника: \"))\n c = float(input(\"Введите длину третьей стороны треугольника: \"))\n\n if is_triangle(a, b, c):\n print(\"Такой треугольник существовать может\")\n else:\n print(\"Такой треугольник существовать не может\")\n quit()\n\n if is_equilateral_triangle(a, b, c):\n print(\"\\t- это равносторонний треугольник\")\n quit()\n\n print(f\"\\t- это равнобедренный треугольник: {is_isosceles_triangle(a, b, c)}\")\n print(f\"\\t- это прямоугольный треугольник: {is_right_triangle(a, b, c)}\")\n","sub_path":"Lesson_1/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"124495548","text":"from behave import when, given, then\nfrom selenium import webdriver\n\ndic = {'nome': 'input_nomePaciente',\n 'telefone': 'input_telefonePaciente',\n 'cpf': 'input_cpfPaciente',\n 'endereco': 'input_enderecoPaciente',\n 'senha1': 'input_senhaPaciente',\n 'senha2': 'input_senhaPaciente2',\n 'Enviar': 'input_botaoCadastrar'}\n\n@given('que o usuario esteja na pagina \"{page}\"')\ndef open_page(context, page):\n br = context.browser\n br.get(context.base_url + page)\n\n\n@when('inserir o \"{field}\" \"{value}\"')\ndef insert_values_on_fields(context, field, value):\n br = context.browser\n br.find_element_by_id(dic[field]).send_keys(value)\n\n\n@then('clicar no botão \"{btn}\"')\ndef click_bnt(context, btn):\n br = context.browser\n br.find_element_by_id(dic[btn]).click()\n\n\n@then('sou redirecionado ao login')\ndef step_impl(context):\n\tbr = context.browser\n\n\tassert br.current_url.endswith('/login/')\n\n\n@then('sou redirecionado ao menu')\ndef step_impl(context):\n\tbr = context.browser\n\n\tassert br.current_url.endswith('/menu/')\n\n@then('sou redirecionado ao cadastro')\ndef step_impl(context):\n\tbr = context.browser\n\tassert br.current_url.endswith('atendente/paciente_signup/')\n","sub_path":"Django/features/steps/generic_signup.py","file_name":"generic_signup.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"229804546","text":"#!/usr/bin/env python3\r\n\r\nclass number(object):\r\n\tnum=0\r\n\tpreNum=-1\r\n\tnextNum=-1\r\n\tsupposeFlag=False\r\n\tsupposeSum=0\r\n\tprimeFlag=True\r\n\tdef __init__(self,value):\r\n\t\tnum=value\r\n\t\tpreNum=-1\r\n\t\tnextNum=-1\r\n\t\tsupposeFlag=False\r\n\t\tsupposeSum=0\r\n\t\tprimeFlag=True\r\n\r\n\tdef setFalse(self):\r\n\t\tprimeFlag=False\r\n\tdef setPre(self,tmpNum):\r\n\t\tpreNum=tmpNum\r\n\tdef setNext(self,tmpNum):\r\n\t\tnextNum=tmpNum\r\n\tdef setSum(self,preSum):\r\n\t\tsupposeSum=preSum\r\n\t\tif(num-preNum==2):\r\n\t\t\tsupposeSum=supposeSum+1\r\n\r\n\tdef getNum(self):\r\n\t\treturn preNum\r\n\tdef getSum(self):\r\n\t\treturn supposeSum\r\n\tdef getPrimeFlag(self):\r\n\t\treturn primeFlag\r\n\r\n\r\nnum=[]\r\nn=100010\r\npreTmp=2;\r\nfor i in range(n):\r\n\tnum.append(number(i))\r\nfor i in range(2,100005):\r\n\tif(num[int(i)].getPrimeFlag()==False):\r\n\t\tcontinue\r\n\tfor j in range(2,n):\r\n\t\tif(int(i*j)>100000):\r\n\t\t\tbreak;\r\n\t\tnum[i*j].setFalse()\r\n#\t\tnum[int(i*j)].setFalse()\r\n\r\n\r\n","sub_path":"pat乙/1007.py","file_name":"1007.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"38345278","text":"from django.shortcuts import get_object_or_404, redirect, render\nfrom personas import forms\nfrom personas.models import Persona\nfrom .forms import RawPersonaForm, PersonaForm\n\ndef personasListView(request):\n\n querySet = Persona.objects.all()\n context = {\n 'objectList': querySet,\n }\n\n return render(request, 'personasLista.html', context)\n\n\ndef personasAnotherCretateView(request):\n form = RawPersonaForm()\n\n if request.method == \"POST\":\n form = RawPersonaForm(request.POST)\n if form.is_valid():\n print(form.cleaned_data)\n Persona.objects.create(**form.cleaned_data)\n else:\n print(form.errors)\n\n context = {\n \"form\": form,\n }\n\n return render(request, 'personasCreate.html', context)\n\ndef personaCreateView(request):\n\n initialValues = {\n 'nombre' : 'Sin nombre',\n }\n\n form = PersonaForm(request.POST or None, initial = initialValues)\n\n if form.is_valid():\n form.save()\n form = PersonaForm()\n\n context = {\n 'form': form,\n }\n\n return render(request, 'personasCreate.html', context)\n\n# RUTEO DINAMICO\ndef personasShowObject(request, myID):\n obj = get_object_or_404(Persona, id = myID)\n context = {\n 'objeto': obj,\n }\n return render(request, 'descripcion.html', context)\n\ndef personasDeleteView(request, myID):\n obj = get_object_or_404(Persona, id = myID)\n if request.method == \"POST\":\n print(\"Lo borro\")\n obj.delete()\n return redirect(\"../\")\n context = {\n 'objeto': obj,\n }\n\n return render(request, 'personasDelete.html', context)\n\ndef home(request):\n return render(request, 'base.html')","sub_path":"personas/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"269574342","text":"class Greeter:\n def Greeter(self):\n print (\"Hello, World\")\n\ndef main():\n fred = Greeter()\n print(fred.Greeter())\n alma = Greeter()\n print(alma.Greeter())\n print (\"Fred is\", id(fred))\n\nif __name__ == '__main__':\n main()\n\n\n# Output:\n#\n# Hello, World\n# None\n# Hello, World\n# None\n# Fred is 2114027471240\n#\n","sub_path":"python/CACC_2019/week18_OOP5_Pub-Pri-Pro/Kelly/script4__main__.py","file_name":"script4__main__.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"42942738","text":"from Class_Padlers import Paddler\nfrom random import shuffle\nfrom random import sample\nfrom random import choice\nfrom random import randrange\nimport copy\nimport time\n#Convert all the csv into Paddler objects\nimport csv\n\nstart = time.time()\n\n\n# Shuffle pacers, engine and rockets independently\ndef generate_boat(paddlers):\n new_boat = copy.copy(paddlers)\n shuffle(new_boat[0])\n shuffle(new_boat[1])\n shuffle(new_boat[2])\n return new_boat\n\n# Create a the generation\ndef generationx(paddlers, n):\n\n gen = []\n for i in range(n):\n new_boat = copy.copy(paddlers)\n a = sample(new_boat[0], len(new_boat[0]))\n b = sample(new_boat[1], len(new_boat[1]))\n c = sample(new_boat[2], len(new_boat[2]))\n gen.append([a,b,c])\n return gen\n\n\n# Check weight - Compare both sides of the division PER\ndef check_weight(boat):\n fitness = 0\n weight_left = 0\n weight_rigth = 0\n for zone in boat:\n weights = check_weight_zone(zone)\n fitness += abs((weights[0]-weights[1]))\n weight_left += weights[0]\n weight_rigth += weights[1]\n\n weight_sides_dif = abs(weight_left - weight_rigth)\n fitness += weight_sides_dif\n return fitness\n\n\ndef check_weight_zone(zone):\n left = 0\n right = 0\n for i in range(len(zone)):\n paddler = zone[i]\n if i %2 == 0:\n left += paddler.weight\n else:\n right += paddler.weight\n\n return [left, right]\n\ndef check_weight_side_dif(boat):\n weight_left = 0\n weight_rigth = 0\n for zone in boat:\n weights = check_weight_zone(zone)\n weight_left += weights[0]\n weight_rigth += weights[1]\n\n weight_sides_dif = abs(weight_left - weight_rigth)\n return weight_sides_dif\n\n#Checks individual rows of the boat to get balance paddlers\ndef check_weight_one_row(boat):\n fitness = 0\n for zone in boat:\n for i in range(0, len(zone), 2):\n left = zone[i].weight\n rigth = zone[i+1].weight\n fitness+=(abs(left-rigth))\n return fitness\n\n\n\n\n\n\n# Check Preference Side\n\"\"\"bothsides_inc, multiplies the punishment if the paddler is not at the right side and can not do both sides\"\"\"\ndef check_preference(boat, punishment, bothsides_inc):\n fitness = 0\n for zone in boat:\n for i in range(len(zone)):\n preference = zone[i].preference\n both_sides = zone[i].bothsides\n # Left side\n if i%2 == 0:\n if preference != \"L\":\n fitness += punishment\n if both_sides != \"Y\":\n fitness += (punishment*bothsides_inc)\n # Rigth side\n else:\n if preference != \"R\":\n fitness += punishment\n if both_sides != \"Y\":\n fitness += (punishment*bothsides_inc)\n\n return fitness\n\n\ndef breed(boats):\n new_gen = []\n for i in range(0, len(boats), 2):\n # Get front-mid or back of the boat by 0,1,2\n boat1 = boats[i]\n boat2 = boats[i+1]\n breed1 = [boat2[0], boat1[1], boat1[2]]\n breed2 = [boat1[0], boat2[1], boat2[2]]\n breed3 = [boat1[0], boat2[1], boat1[2]]\n breed4 = [boat2[0], boat1[1], boat2[2]]\n breed5 = [boat1[0], boat1[1], boat2[2]]\n breed6 = [boat2[0], boat2[1], boat1[2]]\n breeds = [breed1, breed2, breed3, breed4, breed5, breed6]\n new_gen.extend(breeds)\n\n return new_gen\n\n\ndef mutation(boats):\n mutations = []\n for old_boat in boats:\n #first decide how many zones to mutate\n #new_boat = copy.copy(old_boat)\n new_boat = []\n for zone in old_boat:\n n_zone = []\n for person in zone:\n n_zone.append(copy.copy(person))\n new_boat.append(n_zone)\n for i in range(len(new_boat)):\n edit = choice([True, False])\n #if yes we exchange positions of 2 paddlers in that zone\n if edit:\n change_id_1 = randrange(0, len(new_boat[i]))\n change_id_2 = randrange(0, len(new_boat[i]))\n paddler_1 = copy.copy(old_boat[i][change_id_1])\n paddler_2 = copy.copy(old_boat[i][change_id_2])\n new_boat[i][change_id_1] = paddler_2\n new_boat[i][change_id_2] = paddler_1\n mutations.append(new_boat)\n\n return mutations\n\n\n\n\ndef genetic_algorithm():\n path = r\"csv_data/Spanish Dragons paddlers.csv\"\n paddlers = []\n with open(path, 'r') as csvFile:\n reader = csv.reader(csvFile)\n next(reader) # skip header\n for row in reader:\n paddler = Paddler(row[0], row[4], float(row[3]), row[5], row[6])\n paddlers.append(paddler)\n\n shuffle(paddlers)\n\n #Divide the boat: 3 rows(pacers), 4 rows(engine) and 3 rows(rockets) - Division PER\n pacers = []\n engine = []\n rocket = []\n for paddler in paddlers:\n if paddler.position == \"P\" and len(pacers) < 6:\n pacers.append(paddler)\n if paddler.position == \"E\" and len(engine) < 8:\n engine.append(paddler)\n if paddler.position == \"R\" and len(rocket) < 6:\n rocket.append(paddler)\n\n # One of the possible boats\n boat = [pacers, engine, rocket]\n\n\n\n\n n_people =16\n iterations = 200\n gen = generationx(boat, n_people)\n\n\n\n for i in range(iterations):\n sorted_boats = []\n for b in gen:\n fitness = check_weight(b) + check_preference(b, 5, 2) + check_weight_one_row(b)\n sorted_boats.append([b, fitness])\n #print (fitness)\n\n # Sort the boats by fitness and select half of the n_people as the best candidates\n sorted_boats = sorted(sorted_boats, key=lambda x: x[1])\n\n #for fitness in sorted_boats:\n # print(fitness[1])\n\n half_sel = int(n_people/2)\n sorted_boats = sorted_boats[:half_sel]\n # Get only the boats with no fitness\n sorted_boats = [x[0] for x in sorted_boats]\n\n\n candidates = sorted_boats\n\n\n new_generation = breed(candidates)\n #copy_gen = copy.copy(new_generation)\n mutations = mutation(new_generation)\n\n candidates.extend(new_generation)\n candidates.extend(mutations)\n\n #Add more random generation\n new_gen = generationx(boat, half_sel)\n candidates.extend(new_gen)\n\n gen = []\n gen = candidates\n\n #Best option\n Best_Option = sorted_boats[0]\n\n print(\"fitness weight is: \" + str(check_weight(Best_Option)))\n\n print(\"Boat Arrangement Optimized\")\n line_up = []\n for zone in Best_Option:\n\n for i in range(0,len(zone), 2):\n\n line = zone[i].name + \"_\" + str(zone[i].weight) + \" / \" + zone[i+1].name + \"_\" + str(zone[i+1].weight)\n\n line_up.append(line)\n\n print(\"weigth diff: \" + str(check_weight_side_dif(Best_Option)))\n\n print(time.time()-start)\n\n\n return line_up\n\n","sub_path":"Main_LineUp.py","file_name":"Main_LineUp.py","file_ext":"py","file_size_in_byte":6951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"246171537","text":"import os\nimport json\nimport random\nimport logging\nimport multiprocessing as mp\nfrom adeft.discover import load_adeft_miner\n\n\nmerged_path = '/content/adeft_mining_results/merged_results'\nlogger = logging.getLogger('collect')\nfile_handler = logging.FileHandler(os.path.join(merged_path,\n 'collect.log'))\nlog_format = logging.Formatter('%(asctime)s - %(name)s - '\n '%(levelname)s - %(message)s')\nfile_handler.setFormatter(log_format)\nlogger.addHandler(file_handler)\n\nmanager = mp.Manager()\nfilenames_map = manager.dict({})\n\n\ndef get_shortform(names):\n result = {}\n for name in names:\n try:\n with open(os.path.join(merged_path, 'miners', name)) as f:\n miner = load_adeft_miner(f)\n shortform = miner.shortform\n result[shortform] = name\n except Exception:\n logger.warning(f'unable to process file {name}')\n pass\n for key, value in result.items():\n filenames_map[key] = value\n\n\nfilenames = os.listdir(os.path.join(merged_path, 'miners'))\nrandom.shuffle(filenames)\nblocks = [filenames[i:i+6000] for i in range(0, len(filenames), 6000)]\n\nwith mp.Pool(64) as pool:\n pool.map(get_shortform, blocks)\n\n\nwith open(os.path.join(merged_path, 'filenames.json'), 'w') as f:\n json.dump(dict(filenames_map), f)\n","sub_path":"scripts/collect.py","file_name":"collect.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"259715286","text":"#%%\n# Create the node class\nclass Node:\n def __init__(self, _value=None, _next=None):\n self.value = _value\n self.next = _next\n def __str__(self):\n return str(self.value)\n\n# Create the LinkedList class\n# Note: Only allow inputting a Node class - Just to keep consistency\n# Alterntively, codes can be also changed to accomodate inputs of pure values\nclass LinkedList: \n def __init__(self, initial_value):\n self.value = initial_value\n self.linkedlist = [initial_value.value]\n ## Complexity: O(1), because it always takes a vlue and inputs and done\n \n def length(self):\n return len(self.linkedlist)\n ## Complexity:O(1), because len() only assesses once\n \n def addNode(self,new_value):\n if isinstance(new_value.value, int) == True:\n self.linkedlist.append(new_value.value)\n return self.linkedlist\n else:\n return \"You node should contain an integer.\"\n ## Complexity: O(1), because it only evaluates append() method once\n \n def addNodeAfter(self, new_value, after_node):\n position_to_add = self.linkedlist.index(after_node.value)\n self.linkedlist.insert(position_to_add+1, new_value.value)\n return self.linkedlist\n ## Complexity: O(1), because regardless of the length of the input,\n ## the insert() method is only evaluated once\n \n def addNodeBefore(self, new_value, after_node):\n position_to_add = self.linkedlist.index(after_node.value)\n self.linkedlist.insert(position_to_add, new_value.value)\n return self.linkedlist\n ## Complexity: O(1), with similar reasons above\n \n def removeNode(self, node_to_remove):\n try:\n self.linkedlist.remove(node_to_remove.value)\n return self.linkedlist\n except ValueError:\n return \"You do not have this node in your list.\"\n ## Complexity: O(1), because the remove() method is only evaluted once\n \n def removeNodesbyValue(self, value):\n for elem in self.linkedlist:\n if elem == value:\n self.linkedlist.remove(value)\n return self.linkedlist\n ## Complexity: O(n), because the method loops over the linkedlist to\n ## decide which element to remove\n ## I think this is the least complex way? Without looping over the list,\n ## I will not know whether there is any element that can be matched.\n \n def reverse(self):\n self.linkedlist.reverse()\n return self.linkedlist\n ## Complexity: O(1), Straightforward\n \n def __str__(self):\n return \"The linkedlist is %s\" % \" -> \".join([str(elem) for elem in self.linkedlist])\n ## Complexity: O(1), Straightforward\n \n\n\n\n#%%\nnode_1 = Node(_value=1)\nnode_2 = Node(_value=2)\nnode_3 = Node(_value=3)\nnode_4 = Node(_value=4)\nnode_5 = Node(_value=5)\nnode_6 = Node(_value=6)\nnode_6_1 = Node(_value=6)\nnode_7 = Node(_value=7)\n\n# Test\nfirst_list = LinkedList(node_1)\nfirst_list.addNode(node_2)\nfirst_list.addNode(node_3)\nfirst_list.addNodeAfter(node_4,node_1)\nfirst_list.addNodeBefore(node_5,node_2)\nfirst_list.addNodeBefore(node_6,node_4)\nfirst_list.addNodeBefore(node_6_1,node_5)\n\nfirst_list.length()\nfirst_list.reverse()\nfirst_list.removeNode(node_2)\nfirst_list.removeNodesbyValue(6)\nprint(first_list)\n","sub_path":"HW/HW5_Deng.py","file_name":"HW5_Deng.py","file_ext":"py","file_size_in_byte":3362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"623180978","text":"# models.py\r\n\r\n#!/usr/bin/env ipython\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras import layers, models, optimizers, preprocessing, losses\r\n\r\nimport numpy as np\r\nimport os, io\r\nfrom pathlib import Path\r\nos.chdir(os.path.dirname(__file__))\r\n\r\nfrom options import opt\r\nfrom layers import *\r\n\r\n\r\nclass Optimizer_FP(optimizers.Adam, optimizers.Optimizer):\r\n\tdef __init__(self, models:list, optimizer_fn=None, clip_lim=None, **kwargs):\r\n\t\t\"\"\"Special optimizer for F and P simultaneously\r\nparameters:\r\n\t\tmodels : list = [F, P, Q]\r\n\t\t\"\"\"\r\n\t\tsuper(Optimizer_FP, self).__init__(**kwargs)\r\n\t\t#self._set_hyper('clipvalue', kwargs.get('clipvalue', clipvalue))\r\n\t\t#clipvalue_t = array_ops.identity(self._get_hyper('clipvalue', var_dtype))\r\n\t\tself.optim = optimizers.Adam(learning_rate=opt.learning_rate) if not optimizer_fn else optimizer_fn\r\n\t\tself.F, self.P, self.Q = models\r\n\t\tself.training = True\r\n\t\tself.clip_lim = clip_lim\r\n\t\tself.metrices = {'loss' : None, 'src_acc' : None, 'tgt_acc' : None, 'avg_loss' : None}\r\n\r\n\tdef call(self, inputs_src, inputs_tgt, labels_src, labels_tgt=None, _lambda=opt._lambda, supervised=False):\r\n\t\t#self._set_hyper('_lambda', _lambda)\r\n\t\tself._lambda = _lambda\r\n\t\tself.supervised = supervised\r\n\t\tself.train_step(inputs_src, inputs_tgt, labels_src, labels_tgt=None, compute_metrices=True)\r\n\t\treturn self.metrices\r\n\t\t\r\n\tdef compute_metrices(self, inputs_src, inputs_tgt, labels_src, labels_tgt=None):\r\n\t\tfeatures_src = self.F(inputs_src)\r\n\t\tfeatures_tgt = self.F(inputs_tgt)\r\n\r\n\t\to_src_sent, l_src_sent = self.P(features_src, labels_src, compute_metrices=True)\r\n\t\to_tgt_sent, l_tgt_sent = self.P(features_tgt, labels_tgt, compute_metrices=True) if self.supervised else (None, 0)\r\n\r\n\t\t_, l_src_ad = self.Q(features_src, compute_metrices=True)\r\n\t\t_, l_tgt_ad = self.Q(features_tgt, compute_metrices=True)\r\n\r\n\t\tself.metrices['loss'] = l_src_sent + l_tgt_sent + self._lambda * (l_src_ad - l_tgt_ad)\r\n\r\n\t\tpredictions = argmax32(o_src_sent)\r\n\t\tself.total = len(labels_src)\r\n\t\tself.correct = np.sum(predictions == labels_src)\r\n\t\tself.metrices['src_acc'] = self.correct / self.total\r\n\r\n\t\tif self.supervised:\r\n\t\t\tpredictions = argmax32(o_tgt_sent)\r\n\t\t\tself.total += len(labels_tgt)\r\n\t\t\tself.correct += np.sum(predictions == labels_tgt, output_type=tf.int8)\r\n\t\t\tself.metrices['tgt_acc'] = self.correct / self.total\r\n\r\n\t\tself.metrices['avg_loss'] = tf.nn.compute_average_loss(self.metrices['loss'], global_batch_size=len(labels_src) + (len(labels_tgt) if labels_tgt else 0))\r\n\t\treturn self.metrices\r\n\t\r\n\t\r\n\tdef train_step(self, inputs_src, inputs_tgt, labels_src, labels_tgt=None, compute_metrices=False):\r\n\t\twith tf.GradientTape() as tape:\r\n\t\t\tloss = self.compute_metrices(inputs_src, inputs_tgt, labels_src, labels_tgt)['loss']\r\n\t\tgradients = tape.gradient(loss, list(self.P.net.trainable_variables) + list(self.F.fcnet.trainable_variables))\r\n\t\tif opt.clip_lim_FP: gradients = [(tf.clip_by_value(grad, self.clip_lim_FP[0], self.clip_lim_FP[1])) for grad in gradients]\r\n\t\tif self.training: self.apply_gradients(zip(gradients, list(self.P.net.trainable_variables) + list(self.F.fcnet.trainable_variables)))\t# experimental_aggregate_gradients\r\n\t\tif compute_metrices: self.compute_metrices(inputs_src, inputs_tgt, labels_src, labels_tgt)\r\n\t\treturn self.metrices\r\n\t\r\n\tdef get_config(self):\r\n\t\tconfig = super(Optimizer_FP, self).get_config()\r\n\t\t#config.update({\r\n\t\t#\t\t'_lambda': self._serialize_hyperparameter('_lambda'),\r\n\t\t#\t\t'clipvalue': self._serialize_hyperparameter('clipvalue'),\r\n\t\t#\t})\r\n\t\treturn config\r\n\t\t#config = {\"name\": self._name}\r\n\t\tif self.clipnorm is not None: config[\"clipnorm\"] = self.clipnorm\r\n\t\tif self.clipvalue is not None: config[\"clipvalue\"] = self.clipvalue\r\n\t\treturn config\r\n\r\n\tdef freeze(self):\r\n\t\tself.training = False\r\n\r\n\tdef unfreeze(self):\r\n\t\tself.training = True\r\n\r\n\t@tf.function\r\n\tdef distributed_train_step(dist_inputs):\r\n\t\tper_replica_losses = mirrored_strategy.run(train_step, args=(dist_inputs,))\r\n\t\treturn mirrored_strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)\r\n\r\n\r\nclass DAN_Feature_Extractor(keras.Model):\r\n\tdef __init__(self, vocab, num_layers, hidden_size, pooling='avg', dropout=0, batch_norm=False, activation=opt.linear_activation):\r\n\t\tsuper(DAN_Feature_Extractor, self).__init__()\r\n\t\tassert num_layers >= 0, 'Invalid layer numbers'\r\n\t\tself.trainable=True\r\n\r\n\t\tself.emb_layer = vocab.init_embed_layer()\r\n\t\tif(pooling == 'sum' or pooling == 'add'): self.pool = Summing(self.emb_layer)\r\n\t\telse: self.pool = Averaging(self.emb_layer)\r\n\r\n\t\tself.fcnet = keras.Sequential()\r\n\t\tself.fcnet.add(self.pool)\r\n\t\tfor i in range(num_layers):\r\n\t\t\tif dropout > 0: self.fcnet.add(layers.Dropout(rate=dropout))\r\n\r\n\t\t\tif i == 0: self.fcnet.add(layers.Dense(units=hidden_size, input_shape=(vocab.emb_size,), activation=activation))\r\n\t\t\telse: self.fcnet.add(layers.Dense(units=hidden_size, input_shape=(hidden_size,), activation=activation))\r\n\r\n\t\t\tif batch_norm: self.fcnet.add(layers.BatchNormalization(input_shape=(hidden_size,)))\t# same shape as input\t# use training=False when making inference from model (model.predict, model.evaluate?)\r\n\r\n\t\t\tself.fcnet.add(layers.LeakyReLU(alpha=0.3))\r\n\t\t\t#self.fcnet.add(layers.ReLU())\r\n\t\r\n\tdef call(self, input):\r\n\t\treturn self.fcnet(input)\t#(self.pool(input))\r\n\r\n\tdef freeze(self):\r\n\t\tself.trainable = False\r\n\t\tself.fcnet.trainable = False\r\n\r\n\tdef unfreeze(self):\r\n\t\tself.trainable = True\r\n\t\tself.fcnet.trainable = True\r\n\r\n\tdef freeze_emb_layer(self):\r\n\t\tself.emb_layer.trainable = False\r\n\r\n\tdef unfreeze_emb_layer(self):\r\n\t\tself.emb_layer.trainable = True\r\n\r\n\r\nclass LSTM_Feature_Extractor(keras.Model):\r\n\tdef __init__(self, vocab, num_layers, hidden_size, dropout=0, bidir_rnn=True, attn_type='dot'):\r\n\t\tsuper(LSTM_Feature_Extractor, self).__init__()\r\n\r\n\t\tself.num_layers = num_layers\r\n\t\tself.bidir_rnn = bidir_rnn\r\n\t\tself.attn_type = attn_type\r\n\t\tself.hidden_size = hidden_size//2 if bdrnn else hidden_size\r\n\t\tself.n_cells = self.num_layers*2 if bdrnn else self.num_layers\r\n\r\n\t\tself.emb_layer = vocab.init_embed_layer()\r\n\r\n\t\tif bidir_rnn: self.rnn = layers.Bidirectional(layers.LSTM(units=self.hidden_size, num_layers=num_layers, dropout=dropout, input_shape=(vocab.emb_size,)))\r\n\t\telse: self.rnn = layers.LSTM(units=self.hidden_size, num_layers=num_layers, dropout=dropout, input_shape=(vocab.emb_size,))\r\n\r\n\t\tif attn_type == 'dot': self.attn = layers.Attention()\r\n\t\telif attn_type == 'add': self.attn = layers.AdditiveAttention()\r\n\r\n\tdef call(self, inputs):\r\n\t\tdata, lengths = inputs\r\n\t\tlengths_list = lengths.tolist()\r\n\t\tbatch_size = len(data)\r\n\t\temb_layer = self.emb_layer(data)\r\n\t\tpacked = pack_padded_sequence(emb_layer, lengths_list, batch_first=True)\r\n\t\tstate_shape = self.n_cells, batch_size, self.hidden_size\r\n\t\th0 = c0 = embeds.data.new(*state_shape)\r\n\t\toutput, (ht, ct) = self.rnn(packed, (h0, c0))\r\n\r\n\t\tif self.attn_type == 'last':\r\n\t\t\treturn ht[-1] if not self.bdrnn \\\r\n\t\t\t\t\t\telse ht[-2:].transpose(0, 1).contiguous().view(batch_size, -1)\r\n\t\telif self.attn_type == 'avg':\r\n\t\t\tunpacked_output = pad_packed_sequence(output, batch_first=True)[0]\r\n\t\t\treturn torch.sum(unpacked_output, 1) / lengths.float().view(-1, 1)\r\n\t\telif self.attn_type == 'dot':\r\n\t\t\tunpacked_output = pad_packed_sequence(output, batch_first=True)[0]\r\n\t\t\treturn self.attn((unpacked_output, lengths))\r\n\t\telse:\r\n\t\t\traise Exception('Please specify valid attention (pooling) mechanism')\r\n\r\n\t#def freeze(self):\r\n\t#\tself.trainable = False\r\n\t#\tself.fcnet.trainable = False\r\n\r\n\t#def unfreeze(self):\r\n\t#\tself.trainable = True\r\n\t#\tself.fcnet.trainable = True\r\n\r\n\r\nclass CNN_Feature_Extractor(keras.Model):\r\n\tdef __init__(self, vocab, num_layers, hidden_size, kernel_num, kernel_sizes, dropout=0):\r\n\t\tsuper(CNN_Feature_Extractor, self).__init__()\r\n\t\tself.emb_layer = vocab.init_embed_layer()\r\n\t\tself.kernel_num = kernel_num\r\n\t\tself.kernel_sizes = kernel_sizes\r\n\r\n\t\tself.convs = nn.ModuleList([nn.Conv2d(1, kernel_num, (K, vocab.emb_size)) for K in kernel_sizes])\r\n\t\t\r\n\t\tassert num_layers >= 0, 'Invalid layer numbers'\r\n\t\tself.fcnet = nn.Sequential()\r\n\t\tfor i in range(num_layers):\r\n\t\t\tif dropout > 0:\r\n\t\t\t\tself.fcnet.add_module('f-dropout-{}'.format(i), nn.Dropout(p=dropout))\r\n\t\t\tif i == 0:\r\n\t\t\t\tself.fcnet.add_module('f-linear-{}'.format(i),\r\n\t\t\t\t\t\tnn.Linear(len(kernel_sizes)*kernel_num, hidden_size))\r\n\t\t\telse:\r\n\t\t\t\tself.fcnet.add_module('f-linear-{}'.format(i), nn.Linear(hidden_size, hidden_size))\r\n\t\t\tself.fcnet.add_module('f-relu-{}'.format(i), nn.ReLU())\r\n\r\n\tdef call(self, inputs):\r\n\t\tdata, lengths = inputs\r\n\t\tbatch_size = len(data)\r\n\t\tembeddings = self.emb_layer(data)\r\n\t\t# conv\r\n\t\tembeddings = tf.expand_dims(embeddings, axis=1) # batch_size, 1, seq_len, emb_size\r\n\t\tx = [functional.relu(conv(embeds)).squeeze(3) for conv in self.convs]\r\n\t\tx = [functional.max_pool1d(i, i.size(2)).squeeze(2) for i in x]\r\n\t\tx = torch.cat(x, 1)\r\n\t\t# fcnet\r\n\t\treturn self.fcnet(x)\r\n\r\n\tdef freeze(self):\r\n\t\tself.trainable = False\r\n\t\tself.fcnet.trainable = False\r\n\r\n\tdef unfreeze(self):\r\n\t\tself.trainable = True\r\n\t\tself.fcnet.trainable = True\r\n\r\n\r\nclass Sentiment_Classifier(keras.Model):\r\n\tdef __init__(self, num_layers, hidden_size, output_size, dropout=0, batch_norm=False, loss_fn=None, **kwargs):\r\n\t\tsuper(Sentiment_Classifier, self).__init__(**kwargs)\r\n\t\tassert num_layers >= 0, 'Invalid layer numbers'\r\n\t\tself.trainable=True\r\n\t\tself.net = models.Sequential()\r\n\t\tfor _ in range(num_layers):\r\n\t\t\tif dropout > 0: self.net.add(layers.Dropout(rate=dropout))\r\n\t\t\tself.net.add(layers.Dense(units=hidden_size, input_shape=(hidden_size,), activation=opt.linear_activation))\r\n\t\t\tif batch_norm: self.net.add(layers.BatchNormalization())\r\n\t\t\tself.net.add(layers.ReLU())\r\n\t\tself.net.add(layers.Dense(units=output_size, input_shape=(hidden_size,), activation=opt.linear_activation))\r\n\t\tself.net.add(layers.Softmax(axis=-1))\r\n\t\t#self.net.add(LogSoftmax(axis=-1))\r\n\t\tself.loss_fn = losses.SparseCategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) if not loss_fn else loss_fn\r\n\t\tself.metrices = {'loss' : None, 'src_acc' : None, 'tgt_acc' : None, 'avg_loss' : None}\r\n\t\r\n\tdef call(self, input, labels, compute_metrices=True):\r\n\t\tif not compute_metrices: return self.net(input)\r\n\t\toutputs_sent = self.net(input)\r\n\t\tloss_sent = self.loss_fn(labels, outputs_sent)\r\n\t\treturn outputs_sent, loss_sent\r\n\r\n\tdef freeze(self):\r\n\t\tself.trainable = False\r\n\t\tself.net.trainable = False\r\n\r\n\tdef unfreeze(self):\r\n\t\tself.trainable = True\r\n\t\tself.net.trainable = True\r\n\r\n\r\n\r\nclass Language_Detector(keras.Model):\r\n\tdef __init__(self, num_layers, hidden_size, dropout=0, batch_norm=False, activation=opt.linear_activation, **kwargs):\r\n\t\tsuper(Language_Detector, self).__init__(**kwargs)\r\n\t\tassert num_layers >= 0, 'Invalid layer numbers'\r\n\t\tself.trainable = True\r\n\t\tself.net = keras.Sequential()\r\n\t\t#self.net.add(layers.InputLayer(input_shape=(900,)))\r\n\t\tfor i in range(num_layers):\r\n\t\t\tif dropout > 0: self.net.add(layers.Dropout(rate=dropout))\r\n\t\t\tself.net.add(layers.Dense(units=hidden_size, input_shape=(hidden_size,), activation=activation))\r\n\t\t\tif batch_norm: self.net.add(layers.BatchNormalization(input_shape=(hidden_size,)))\r\n\t\tself.net.add(layers.Dense(units=hidden_size, input_shape=(hidden_size,), activation=activation))\r\n\t\tself.net.add(layers.Dense(units=1, input_shape=(hidden_size,), activation=activation))\r\n\t\tself.metrices = {'loss' : None}\r\n\r\n\tdef call(self, input, compute_metrices=False):\r\n\t\tif not compute_metrices: return self.net(input)\r\n\t\toutput_ad = self.net(input)\r\n\t\tloss_ad = self.loss_fn(output_ad)\r\n\t\tself.metrices['loss'] = loss_ad\r\n\t\treturn output_ad, loss_ad\r\n\r\n\tdef compile(self, optimizer=None, loss_fn=tf.reduce_mean):\r\n\t\tsuper(Language_Detector, self).compile()\r\n\t\tself.optimizer = optimizers.Adam(learning_rate=opt.Q_learning_rate) if not optimizer else optimizer\r\n\t\tself.loss_fn = loss_fn\r\n\t\t#self.net.compile(optimizer=self.optimizer, loss=loss_fn)\r\n\r\n\tdef train_step(self, features, name='src', _lambda=1.0):\r\n\t\tsgn = -1 if name == 'src' else 1\r\n\t\twith tf.GradientTape() as tape:\r\n\t\t\toutput_ad = _lambda * self(features, training=True)\r\n\t\t\tloss_ad = sgn * self.loss_fn(output_ad, axis=-1, name='loss_ad')\r\n\t\t#log.info(loss_ad)\r\n\t\ttrainable_variables = self.net.trainable_variables\r\n\t\tgrads = tape.gradient(loss_ad, trainable_variables)\r\n\t\tif self.trainable: self.optimizer.apply_gradients(zip(grads, self.net.trainable_weights))\r\n\t\tself.metrices['loss'] = loss_ad\r\n\t\treturn self.metrices\r\n\r\n\tdef clip_weights(self):\r\n\t\tpass\r\n\r\n\tdef freeze(self):\r\n\t\tself.trainable = False\r\n\t\tself.net.trainable = False\r\n\r\n\tdef unfreeze(self):\r\n\t\tself.trainable = True\r\n\t\tself.net.trainable = True\r\n\r\n\r\n\r\n\"\"\"\r\noptimizer = tf.train.AdamOptimizer(learning_rate=0.001)\r\ngrads_and_vars = optimizer.compute_gradients(loss_final)\r\ngrads, _ = list(zip(*grads_and_vars))\r\nnorms = tf.global_norm(grads)\r\ngradnorm_s = tf.summary.scalar('gradient norm', norms)\r\ntrain_op = optimizer.apply_gradients(grads_and_vars, name='train_op')\r\n\"\"\"\r\n","sub_path":"code/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":12959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"324891458","text":"##\n# See the file COPYRIGHT for copyright information.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##\n\n\"\"\"\nIncident Management System web service command line tool.\n\"\"\"\n\n__all__ = [\n \"WebTool\",\n]\n\nfrom twisted.python.filepath import FilePath\nfrom twisted.logger import Logger\nfrom twisted.web.server import Site, Session\nfrom twext.python.usage import (\n Executable, Options as BaseOptions, exit, ExitStatus\n)\n\nfrom ..data.model import Event\nfrom ..store.istore import StorageError\nfrom ..store.sqlite import Storage\nfrom .log import patchCombinedLogFormatter\nfrom .config import Configuration\nfrom .service import WebService\n\n\n\nclass IMSSession(Session):\n sessionTimeout = 60 * 60 * 1 # 1 hour\n\n\n\nclass ConfigOptionsMixIn(object):\n \"\"\"\n Mixin for L{Options} which adds options for reading an IMS config file.\n \"\"\"\n\n def opt_config(self, path):\n \"\"\"\n Location of configuration file.\n \"\"\"\n self[\"configFile\"] = FilePath(path)\n\n\n def initConfig(self):\n try:\n configFile = self.get(\"configFile\")\n\n if configFile is None:\n if FilePath(\"./.develop\").isdir():\n dev = FilePath(\"./conf/imsd.conf\")\n if dev.isfile():\n configFile = dev\n\n if configFile is None:\n configuration = Configuration(None)\n else:\n if not configFile.isfile():\n exit(ExitStatus.EX_CONFIG, \"Config file not found.\")\n configuration = Configuration(configFile)\n\n if \"logFile\" not in self:\n self.opt_log_file(configuration.LogFile)\n if \"logFormat\" not in self:\n self.opt_log_format(configuration.LogFormat)\n if \"logLevel\" not in self:\n self.opt_log_level(configuration.LogLevel)\n if \"pidFile\" not in self:\n self.opt_pid_file(configuration.PIDFile)\n\n self[\"configuration\"] = configuration\n except Exception as e:\n exit(ExitStatus.EX_CONFIG, unicode(e))\n\n\n\nclass WebTool(Executable):\n \"\"\"\n Incident Management System web service command line tool.\n \"\"\"\n\n log = Logger()\n\n\n class Options(BaseOptions, ConfigOptionsMixIn):\n optFlags = []\n\n optParameters = [\n [\"port\", \"p\", 8080, \"Port to listen on.\"],\n ]\n\n\n def postOptions(self):\n Executable.postOptions(self)\n\n patchCombinedLogFormatter()\n\n self.options.initConfig()\n\n\n def whenRunning(self):\n config = self.options[\"configuration\"]\n config.directory.loadRecords()\n service = WebService(config)\n\n host = self.options.get(\"host\", \"localhost\")\n port = int(self.options[\"port\"])\n\n self.log.info(\n \"Setting up web service at http://{host}:{port}/\",\n host=host, port=port,\n )\n\n factory = Site(service.resource())\n factory.sessionFactory = IMSSession\n\n from twisted.internet import reactor\n reactor.listenTCP(port, factory, interface=host)\n\n\n\nclass KleinTool(Executable):\n \"\"\"\n Incident Management System web service command line tool.\n \"\"\"\n\n log = Logger()\n\n\n class Options(BaseOptions, ConfigOptionsMixIn):\n optFlags = []\n\n optParameters = []\n\n\n def postOptions(self):\n Executable.postOptions(self)\n\n self.options.initConfig()\n\n config = self.options[\"configuration\"]\n service = WebService(config)\n\n for rule in service.app.url_map.iter_rules():\n methods = list(rule.methods)\n print(\n \"{rule.rule} {methods} -> {rule.endpoint}\"\n .format(rule=rule, methods=methods)\n )\n\n exit(ExitStatus.EX_OK)\n\n\n\nclass LegacyLoadTool(Executable):\n \"\"\"\n Incident Management System tool for loading data from a legacy file store\n into to a database store.\n \"\"\"\n\n log = Logger()\n\n class Options(BaseOptions, ConfigOptionsMixIn):\n optFlags = []\n\n optParameters = []\n\n\n def __init__(self):\n BaseOptions.__init__(self)\n self.opt_log_file(\"-\")\n\n\n def getSynopsis(self):\n return \"{} datadir [datadir ...]\".format(\n BaseOptions.getSynopsis(self)\n )\n\n\n def parseArgs(self, *datadirs):\n BaseOptions.parseArgs(self)\n self[\"fileStores\"] = [FilePath(d) for d in datadirs]\n\n\n def postOptions(self):\n Executable.postOptions(self)\n\n self.options.initConfig()\n\n\n def whenRunning(self):\n try:\n config = self.options[\"configuration\"]\n\n storage = Storage(config.DatabaseFile)\n\n for storeFilePath in self.options[\"fileStores\"]:\n try:\n storage.loadFromFileStore(storeFilePath)\n except StorageError as e:\n self.log.critical(\n \"{error}\", store=storeFilePath, error=e\n )\n break\n\n finally:\n from twisted.internet import reactor\n reactor.stop()\n\n\nclass JSONLoadTool(Executable):\n \"\"\"\n Incident Management System tool for loading data from a JSON file into to a\n database store.\n \"\"\"\n\n log = Logger()\n\n class Options(BaseOptions, ConfigOptionsMixIn):\n optFlags = []\n\n optParameters = []\n\n\n def __init__(self):\n BaseOptions.__init__(self)\n self.opt_log_file(\"-\")\n self[\"trialRun\"] = False\n\n\n def getSynopsis(self):\n return \"{} event file\".format(\n BaseOptions.getSynopsis(self)\n )\n\n\n def opt_trial(self):\n self[\"trialRun\"] = True\n\n opt_t = opt_trial\n\n\n def parseArgs(self, eventID, fileName):\n BaseOptions.parseArgs(self)\n\n self[\"event\"] = Event(eventID)\n self[\"filePath\"] = FilePath(fileName)\n\n\n def postOptions(self):\n Executable.postOptions(self)\n\n self.options.initConfig()\n\n\n def whenRunning(self):\n try:\n config = self.options[\"configuration\"]\n event = self.options[\"event\"]\n filePath = self.options[\"filePath\"]\n trialRun = self.options[\"trialRun\"]\n\n storage = Storage(config.DatabaseFile)\n\n try:\n storage.loadFromEventJSON(event, filePath, trialRun=trialRun)\n except StorageError as e:\n self.log.critical(\n \"{error}\", event=event, file=filePath, error=e\n )\n\n finally:\n from twisted.internet import reactor\n reactor.stop()\n","sub_path":"ims/service/tool.py","file_name":"tool.py","file_ext":"py","file_size_in_byte":7209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"107065436","text":"###############################################################################\n# Copyright Kitware Inc.\n#\n# Licensed under the Apache License, Version 2.0 ( the \"License\" );\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n###############################################################################\n\nimport os\nimport sys\n\n# Worker-defined inputs\noriginalFile = globals()['originalFile']\nsegmentation_helpersPath = globals()['segmentation_helpersPath']\n\nsegmentation_helpersDirPath = os.path.dirname(segmentation_helpersPath)\nif segmentation_helpersDirPath not in sys.path:\n sys.path.append(segmentation_helpersDirPath)\nfrom segmentation_helpers.scikit import ScikitSegmentationHelper # noqa E402\n\n\nwith open(originalFile, 'rb') as originalFileStream:\n # Scikit-Image is ~70ms faster at decoding image data\n originalImageData = ScikitSegmentationHelper.loadImage(originalFileStream)\n\nsuperpixelsData = ScikitSegmentationHelper.superpixels(originalImageData)\nsuperpixelsEncodedStream = ScikitSegmentationHelper.writeImage(\n superpixelsData, 'png')\n\nsuperpixelsEncodedBytes = superpixelsEncodedStream.getvalue()\n","sub_path":"server/models/_generate_superpixels.py","file_name":"_generate_superpixels.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"572989227","text":"\"\"\"Support for Genius Hub binary_sensor devices.\"\"\"\nfrom datetime import datetime\nimport logging\n\nfrom homeassistant.components.binary_sensor import BinarySensorDevice\nfrom homeassistant.core import callback\nfrom homeassistant.helpers.dispatcher import async_dispatcher_connect\n\nfrom . import DOMAIN\n\n_LOGGER = logging.getLogger(__name__)\n\nGH_IS_SWITCH = ['Dual Channel Receiver', 'Electric Switch', 'Smart Plug']\n\n\nasync def async_setup_platform(hass, config, async_add_entities,\n discovery_info=None):\n \"\"\"Set up the Genius Hub sensor entities.\"\"\"\n client = hass.data[DOMAIN]['client']\n\n devices = [d for d in client.hub.device_objs if d.type is not None]\n switches = [GeniusBinarySensor(client, d)\n for d in devices if d.type[:21] in GH_IS_SWITCH]\n\n async_add_entities(switches)\n\n\nclass GeniusBinarySensor(BinarySensorDevice):\n \"\"\"Representation of a Genius Hub binary_sensor.\"\"\"\n\n def __init__(self, client, device):\n \"\"\"Initialize the binary sensor.\"\"\"\n self._client = client\n self._device = device\n\n if device.type[:21] == 'Dual Channel Receiver':\n self._name = 'Dual Channel Receiver {}'.format(device.id)\n else:\n self._name = '{} {}'.format(device.type, device.id)\n\n async def async_added_to_hass(self):\n \"\"\"Set up a listener when this entity is added to HA.\"\"\"\n async_dispatcher_connect(self.hass, DOMAIN, self._refresh)\n\n @callback\n def _refresh(self):\n self.async_schedule_update_ha_state(force_refresh=True)\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return self._name\n\n @property\n def should_poll(self) -> bool:\n \"\"\"Return False as the geniushub devices should not be polled.\"\"\"\n return False\n\n @property\n def is_on(self):\n \"\"\"Return the status of the sensor.\"\"\"\n return self._device.state['outputOnOff']\n\n @property\n def device_state_attributes(self):\n \"\"\"Return the device state attributes.\"\"\"\n attrs = {}\n attrs['assigned_zone'] = self._device.assignedZones[0]['name']\n\n last_comms = self._device._info_raw['childValues']['lastComms']['val'] # noqa; pylint: disable=protected-access\n if last_comms != 0:\n attrs['last_comms'] = datetime.utcfromtimestamp(\n last_comms).isoformat()\n\n return {**attrs}\n","sub_path":"homeassistant/components/geniushub/binary_sensor.py","file_name":"binary_sensor.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"503329221","text":"\"\"\"sqleco client.\"\"\"\nfrom socket import socket, AF_INET, SOCK_STREAM\nimport sys\n\n\ndef client(command, host=\"localhost\", port=2018):\n sock = socket(AF_INET, SOCK_STREAM)\n sock.connect((host, port))\n sock.send(command.encode())\n reply = sock.recv(1024)\n sock.close()\n print(reply)\n\n\nif __name__ == '__main__':\n client(command=\" \".join(sys.argv[1:]))\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"309426036","text":"from __future__ import absolute_import, division, print_function, unicode_literals\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # ignore init warnings\nimport tensorflow as tf\nimport numpy as np\nimport tensorflow_datasets as tfds\ntfds.disable_progress_bar()\nimport math\nimport matplotlib.pyplot as plt\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '0' # warnings are good now\n\ndataset, metadata = tfds.load('fashion_mnist', as_supervised=True, with_info=True)\ntrain_ds, test_ds = dataset['train'], dataset['test']\n\nclass_names = ['T-Shirt', 'Pants', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Boot']\n\nnum_train_ex = metadata.splits['train'].num_examples\nnum_test_ex = metadata.splits['test'].num_examples\n\nprint(\"Number of training examples: {}\\nNumber of test examples: {}\".format(num_train_ex, num_test_ex))\n\ndef normalize(images, labels):\n images = tf.cast(images, tf.float32)\n images /= 255\n return images, labels\n\ntrain_ds = train_ds.map(normalize)\ntest_ds = test_ds.map(normalize)\n\ntrain_ds = train_ds.cache()\ntest_ds = test_ds.cache()\n\nprint(\"Building model...\")\n\nmodel = tf.keras.Sequential([\n tf.keras.layers.Flatten(input_shape=(28, 28, 1)),\n tf.keras.layers.Dense(128, activation=tf.nn.relu),\n tf.keras.layers.Dense(10)\n])\n\nprint(\"Compiling model...\")\n\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy']\n)\n\nprint(\"Training model...\")\n\nBATCH_SIZE = 32\ntrain_ds = train_ds.cache().repeat().shuffle(num_train_ex).batch(BATCH_SIZE)\ntest_ds = test_ds.cache().batch(BATCH_SIZE)\n\nmodel.fit(train_ds, epochs=5, steps_per_epoch=math.ceil(num_train_ex/BATCH_SIZE))\n\ntest_loss, test_accuracy = model.evaluate(test_ds, steps=math.ceil(num_test_ex/32))\nprint('Accuracy on test dataset:', test_accuracy)\n","sub_path":"Python/tensorflow/fashion.py","file_name":"fashion.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"332491451","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport datetime\nfrom urlparse import urljoin\n\nfrom django.db.models import Q\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.shortcuts import get_object_or_404\nfrom django.contrib import messages\nfrom django.http import HttpResponseRedirect\nfrom django.http import HttpResponseForbidden\nfrom django.http import HttpResponse\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.safestring import mark_safe\nfrom django.utils import simplejson\nfrom django.utils.html import escape\nfrom django.conf import settings\nfrom lck.django.common import nested_commit_on_success\nfrom lck.django.filters import slugify\nfrom bob.menu import MenuItem, MenuHeader\n\nfrom ralph.cmdb.forms import (\n CISearchForm, CIEditForm, CIViewForm, CIRelationEditForm, SearchImpactForm\n)\nfrom ralph.cmdb.customfields import EditAttributeFormFactory\nfrom ralph.cmdb.models_ci import (\n CIOwner, CIOwnership, CILayer, CI_TYPES, CI, CIRelation, CI_LAYER\n)\nimport ralph.cmdb.models as db\nfrom ralph.cmdb.graphs import search_tree, ImpactCalculator\nfrom ralph.account.models import Perm\nfrom ralph.ui.views.common import Base, _get_details\nfrom ralph.util.presentation import (\n get_device_icon, get_venture_icon, get_network_icon\n)\n\n\nROWS_PER_PAGE = 20\nSAVE_PRIORITY = 200\n\n\ndef get_icon_for(ci):\n if not ci or not ci.content_object:\n return\n if ci.content_type.name == 'venture':\n return get_venture_icon(ci.content_object)\n elif ci.content_type.name == 'device':\n return get_device_icon(ci.content_object)\n elif ci.content_type.name == 'network':\n return get_network_icon(ci.content_object)\n else:\n return 'wall'\n\n\nclass BaseCMDBView(Base):\n template_name = 'nope.html'\n Form = CIRelationEditForm\n\n def generate_breadcrumb(self):\n parent = self.request.GET.get('parent', '')\n if not parent:\n return []\n list = []\n counter = 0\n while parent and counter < 100:\n ci = db.CI.objects.filter(id=parent).all()[0]\n list.insert(0, ci)\n try:\n parent = db.CI.objects.filter(parent__child=parent).all()[0].id\n except:\n parent = None\n if parent == ci.id:\n parent = None\n counter += 1\n return list\n\n def get_permissions_dict(self):\n has_perm = self.request.user.get_profile().has_perm\n ci_perms = [\n 'create_configuration_item',\n 'edit_configuration_item_info_generic',\n 'edit_configuration_item_relations',\n 'read_configuration_item_info_generic',\n 'read_configuration_item_info_puppet',\n 'read_configuration_item_info_git',\n 'read_configuration_item_info_jira',\n ]\n ret = {}\n for perm in ci_perms:\n ret.update({perm + '_perm': has_perm(getattr(Perm, perm))})\n return ret\n\n def get_sidebar_items(self):\n ci = (\n ('/cmdb/add', 'Add CI', 'fugue-block--plus'),\n ('/cmdb/changes/dashboard', 'Dashboard', 'fugue-dashboard'),\n ('/cmdb/graphs', 'Impact report', 'fugue-dashboard'),\n ('/cmdb/graphs_tree', 'Tree deps.', 'fugue-dashboard'),\n ('/cmdb/changes/dashboard', 'Dashboard', 'fugue-dashboard'),\n ('/cmdb/changes/timeline', 'Timeline View', 'fugue-dashboard'),\n ('/admin/cmdb', 'Admin', 'fugue-toolbox'),\n )\n\n layers = (\n ('/cmdb/search?layer=1&type=1', 'Applications',\n 'fugue-applications-blue'),\n ('/cmdb/search?layer=2&top_level=1', 'Databases',\n 'fugue-database'),\n ('/cmdb/search?layer=3&top_level=1', 'Documentation/Procedures',\n 'fugue-blue-documents'),\n ('/cmdb/search?layer=4&top_level=1',\n 'Organization Unit/Support Group',\n 'fugue-books-brown'),\n ('/cmdb/search?layer=5&type=2', 'Hardware',\n 'fugue-processor'),\n ('/cmdb/search?layer=6&type=8', 'Network',\n 'fugue-network-ip'),\n ('/cmdb/search?layer=7&type=7', 'Services',\n 'fugue-disc-share'),\n ('/cmdb/search?layer=8&type=5', 'Roles',\n 'fugue-computer-network'),\n ('/cmdb/search', 'All Cis (all layers)', 'fugue-magnifier'),\n )\n reports = (\n ('/cmdb/changes/reports?kind=top_changes',\n 'Top CI changes', 'fugue-reports'),\n ('/cmdb/changes/reports?kind=top_problems',\n 'Top CI problems', 'fugue-reports'),\n ('/cmdb/changes/reports?kind=top_incidents',\n 'Top CI incidents', 'fugue-reports'),\n ('/cmdb/changes/reports?kind=usage',\n 'Cis w/o changes', 'fugue-reports'),\n )\n events = (\n ('/cmdb/changes/changes', 'All Events', 'fugue-arrow'),\n ('/cmdb/changes/changes?type=3', 'Asset attr. changes',\n 'fugue-wooden-box--arrow'),\n ('/cmdb/changes/changes?type=4', 'Monitoring events',\n 'fugue-thermometer'),\n ('/cmdb/changes/changes?type=1', 'Repo changes',\n 'fugue-git'),\n ('/cmdb/changes/changes?type=2', 'Agent events',\n 'fugue-flask'),\n ('/cmdb/changes/changes?type=5', 'Status Office events',\n 'fugue-plug'),\n ('/cmdb/changes/incidents', 'Incidents',\n 'fugue-question'),\n ('/cmdb/changes/problems', 'Problems',\n 'fugue-bomb')\n )\n sidebar_items = (\n [MenuHeader('Configuration Items')] +\n [MenuItem(\n label=t[1],\n fugue_icon=t[2],\n href=t[0]\n ) for t in ci] +\n [MenuHeader('CI by Layers')] +\n [MenuItem(\n label=t[1],\n fugue_icon=t[2],\n href=t[0]\n ) for t in layers] +\n [MenuHeader('Reports')] +\n [MenuItem(\n label=t[1],\n fugue_icon=t[2],\n href=t[0]\n ) for t in reports] +\n [MenuHeader('Events and Changes')] +\n [MenuItem(\n label=t[1],\n fugue_icon=t[2],\n href=t[0]\n ) for t in events]\n )\n return sidebar_items\n\n def get_context_data(self, *args, **kwargs):\n ret = super(BaseCMDBView, self).get_context_data(**kwargs)\n ret.update(self.get_permissions_dict())\n ret.update({\n 'sidebar_items': self.get_sidebar_items(),\n 'breadcrumbs': self.generate_breadcrumb(),\n 'url_query': self.request.GET,\n 'span_number': '6',\n 'ZABBIX_URL': settings.ZABBIX_URL,\n 'SO_URL': settings.SO_URL,\n 'tabs_left': False,\n 'fisheye_url': settings.FISHEYE_URL,\n 'fisheye_project': settings.FISHEYE_PROJECT_NAME,\n 'section': 'cmdb',\n })\n return ret\n\n\ndef _get_pages(paginator, page):\n pages = paginator.page_range[\n max(0, page - 4):min(paginator.num_pages, page + 3)\n ]\n if 1 not in pages:\n pages.insert(0, 1)\n pages.insert(1, '...')\n if paginator.num_pages not in pages:\n pages.append('...')\n pages.append(paginator.num_pages)\n return pages\n\n\ndef get_error_title(form):\n return ', '.join(form.errors['__all__']) or 'Correct the errors.' if form.errors else ''\n\n\nclass EditRelation(BaseCMDBView):\n template_name = 'cmdb/edit_relation.html'\n Form = CIRelationEditForm\n\n form_options = dict(\n label_suffix='',\n prefix='base',\n )\n\n def get_context_data(self, **kwargs):\n ret = super(EditRelation, self).get_context_data(**kwargs)\n ret.update({\n 'form': self.form,\n })\n return ret\n\n def get(self, *args, **kwargs):\n if not self.get_permissions_dict().get(\n 'edit_configuration_item_relations_perm', False):\n return HttpResponseForbidden()\n rel_id = kwargs.get('relation_id')\n rel = get_object_or_404(db.CIRelation, id=rel_id)\n self.form_options['instance'] = rel\n self.form = self.Form(**self.form_options)\n self.rel_parent = rel.parent\n self.rel_child = rel.child\n self.rel_type = rel.type\n self.rel = rel\n return super(EditRelation, self).get(*args, **kwargs)\n\n @nested_commit_on_success\n def post(self, *args, **kwargs):\n self.form = None\n self.rel = None\n rel_id = kwargs.get('relation_id')\n rel = get_object_or_404(db.CIRelation, id=rel_id)\n self.form_options['instance'] = rel\n\n ci_id = kwargs.get('ci_id')\n if ci_id:\n # remove relation\n ci_relation = db.CIRelation.objects.filter(id=rel_id).all()\n ci_relation.delete()\n return HttpResponse('ok')\n if self.Form:\n self.form = self.Form(self.request.POST, **self.form_options)\n if self.form.is_valid():\n ci_id = self.kwargs.get('ci_id')\n model = self.form.save(commit=False)\n model.save(user=self.request.user)\n return HttpResponseRedirect('/cmdb/edit/%s' % ci_id)\n else:\n error_title = get_error_title(self.form)\n messages.error(self.request, _(error_title))\n return super(EditRelation, self).get(*args, **kwargs)\n\n\nclass AddRelation(BaseCMDBView):\n template_name = 'cmdb/add_relation.html'\n Form = CIRelationEditForm\n\n form_options = dict(\n label_suffix='',\n prefix='base',\n )\n\n def get_context_data(self, **kwargs):\n ret = super(AddRelation, self).get_context_data(**kwargs)\n ret.update({\n 'form': self.form,\n 'relations_parent': self.relations_parent,\n 'relations_child': self.relations_child,\n })\n return ret\n\n def form_initial(self):\n data = {\n 'parent': self.rel_parent,\n 'child': self.rel_child,\n }\n return data\n\n def get(self, *args, **kwargs):\n if not self.get_permissions_dict().get(\n 'edit_configuration_item_relations_perm',\n False):\n return HttpResponseForbidden()\n self.rel_parent = self.request.GET.get('rel_parent')\n self.rel_child = self.request.GET.get('rel_child')\n ci_id = kwargs.get('ci_id')\n self.ci = get_object_or_404(db.CI, id=ci_id)\n self.relations_parent = [\n x.child for x in db.CIRelation.objects.filter(parent=ci_id)\n ]\n self.relations_child = [\n x.parent for x in db.CIRelation.objects.filter(child=ci_id)\n ]\n self.form_options['initial'] = self.form_initial()\n self.form = self.Form(**self.form_options)\n return super(AddRelation, self).get(*args, **kwargs)\n\n @nested_commit_on_success\n def post(self, *args, **kwargs):\n self.form = None\n self.rel = None\n ci_id = kwargs.get('ci_id')\n self.ci = get_object_or_404(db.CI, id=ci_id)\n self.relations_parent = db.CIRelation.objects.filter(\n parent=ci_id,\n )\n self.relations_child = db.CIRelation.objects.filter(\n child=ci_id,\n )\n if self.Form:\n self.form = self.Form(self.request.POST, **self.form_options)\n if self.form.is_valid():\n ci_id = self.kwargs.get('ci_id')\n model = self.form.save(commit=False)\n model.save(user=self.request.user)\n return HttpResponseRedirect('/cmdb/ci/edit/%s' % ci_id)\n else:\n error_title = get_error_title(self.form)\n messages.error(self.request, _(error_title))\n return super(AddRelation, self).get(*args, **kwargs)\n\n\nclass Add(BaseCMDBView):\n template_name = 'cmdb/add_ci.html'\n Form = CIEditForm\n form_options = dict(\n label_suffix='',\n prefix='base',\n )\n\n def get_context_data(self, **kwargs):\n ret = super(Add, self).get_context_data(**kwargs)\n ret.update({\n 'form': self.form,\n 'label': 'Add CI',\n 'subsection': 'Add CI',\n 'sidebar_selected': 'add ci',\n })\n return ret\n\n def get(self, *args, **kwargs):\n self.form = self.Form(**self.form_options)\n return super(Add, self).get(*args, **kwargs)\n\n @nested_commit_on_success\n def post(self, *args, **kwargs):\n self.form = None\n self.ci = None\n if self.Form:\n self.form = self.Form(self.request.POST, **self.form_options)\n if self.form.is_valid():\n model = self.form.save()\n if not model.content_object:\n model.uid = \"%s-%s\" % ('mm', model.id)\n model.save(user=self.request.user)\n model.owners.clear()\n model.layers.clear()\n layers = self.form.data.getlist('base-layers')\n for layer in layers:\n model.layers.add(CILayer.objects.get(pk=int(layer[0])))\n\n owners_t = self.form.data.getlist('base-technical_owners')\n for owner in owners_t:\n own = CIOwnership(ci=model,\n owner=CIOwner.objects.get(pk=owner[0]),\n type=1,)\n own.save()\n owners_b = self.form.data.getlist('base-business_owners')\n for owner in owners_b:\n own = CIOwnership(ci=model,\n owner=CIOwner.objects.get(pk=owner[0]),\n type=2,)\n own.save()\n messages.success(self.request, _(\"Changes saved.\"))\n return HttpResponseRedirect('/cmdb/ci/edit/' + unicode(model.id))\n else:\n messages.error(self.request, _(\"Correct the errors.\"))\n\n return super(Add, self).get(*args, **kwargs)\n\n\nclass LastChanges(BaseCMDBView):\n template_name = 'cmdb/search_changes.html'\n\n def get_context_data(self, **kwargs):\n ret = super(LastChanges, self).get_context_data(**kwargs)\n ret.update({\n 'last_changes': self.last_changes,\n 'jira_url': urljoin(settings.ISSUETRACKERS['default']['URL'], 'browse'),\n })\n return ret\n\n def get_last_changes(self, ci):\n from ralph.cmdb.integration.jira import Jira\n params = dict(jql='DB\\\\ CI=\"%s\"' % self.ci_uid)\n xxx = Jira().find_issues(params)\n items_list = []\n for i in xxx.get('issues'):\n f = i.get('fields')\n items_list.append(dict(\n key=i.get('key'),\n description=f.get('description'),\n summary=f.get('summary'),\n assignee=f.get('assignee').get('displayName'))),\n return items_list\n\n def get(self, *args, **kwargs):\n self.ci_uid = kwargs.get('ci_id', None)\n self.last_changes = self.get_last_changes(self.ci_uid)\n return super(LastChanges, self).get(*args, **kwargs)\n\n\nclass Edit(BaseCMDBView):\n template_name = 'cmdb/edit_ci.html'\n Form = CIEditForm\n form_attributes_options = dict(label_suffix='', prefix='attr')\n form_options = dict(label_suffix='', prefix='base')\n\n def get_first_parent_venture_name(self, ci_id):\n cis = db.CI.objects.filter(\n relations__parent__child=ci_id,\n relations__parent__parent__type=db.CI_TYPES.VENTUREROLE.id).all()\n if cis:\n return cis[0].name\n\n def generate_breadcrumb(self):\n if getattr(self, 'ci'):\n parent = self.ci.id\n else:\n return []\n list = []\n counter = 0\n while parent and counter < 100:\n ci = db.CI.objects.filter(id=parent).all()[0]\n list.insert(0, ci)\n try:\n parent = db.CI.objects.filter(parent__child=parent).all()[0].id\n except:\n parent = None\n if parent == ci.id:\n parent = None\n counter += 1\n return list\n\n def get_messages(self):\n days = datetime.timedelta(days=7)\n last_week_puppet_errors = db.CIChangePuppet.objects.filter(\n ci=self.ci,\n time__range=(\n datetime.datetime.now(), datetime.datetime.now() - days)\n ).count()\n\n incidents = db.CIIncident.objects.filter(\n ci=self.ci,\n ).count()\n\n problems = db.CIProblem.objects.filter(\n ci=self.ci,\n ).count()\n messages = []\n if last_week_puppet_errors:\n messages.append(dict(\n message=\"Puppet reported %d errors since last week.\" % (\n last_week_puppet_errors),\n title='Warning',\n type='warning',\n ))\n if incidents:\n messages.append(dict(\n message=\"This CI has %d incidents.\" % (incidents),\n title='Be carefull.',\n type='error',\n ))\n if problems:\n messages.append(dict(\n message=\"This CI has %d problems.\" % (problems),\n title='Be carefull.',\n type='error',\n ))\n\n return messages\n\n def get_context_data(self, **kwargs):\n ret = super(Edit, self).get_context_data(**kwargs)\n ret.update({\n 'form': self.form,\n 'form_attributes': self.form_attributes,\n 'ci': self.ci,\n 'ci_id': self.ci.id,\n 'uid': self.ci.uid,\n 'label': 'Edit CI: {} (uid: {})'.format(self.ci.name, self.ci.uid),\n 'relations_contains': self.relations_contains,\n 'relations_requires': self.relations_requires,\n 'relations_isrequired': self.relations_isrequired,\n 'relations_parts': self.relations_parts,\n 'relations_hasrole': self.relations_hasrole,\n 'relations_isrole': self.relations_isrole,\n 'puppet_reports': self.puppet_reports,\n 'git_changes': self.git_changes,\n 'device_attributes_changes': self.device_attributes_changes,\n 'ci_attributes_changes': self.ci_attributes_changes,\n 'problems': self.problems,\n 'incidents': self.incidents,\n 'zabbix_triggers': self.zabbix_triggers,\n 'service_name': self.service_name,\n 'so_events': self.so_events,\n 'cmdb_messages': self.get_messages(),\n 'show_in_ralph': self.show_in_ralph,\n 'ralph_ci_link': self.ralph_ci_link,\n 'subsection': 'Edit - %s' % self.ci.name,\n })\n return ret\n\n def custom_form_initial(self, ci):\n data = dict()\n objs = db.CIAttributeValue.objects.filter(ci=ci)\n for obj in objs:\n field_type = obj.attribute.attribute_type\n if field_type == db.CI_ATTRIBUTE_TYPES.INTEGER.id:\n field_type = 'integer'\n value = obj.value_integer.value\n elif field_type == db.CI_ATTRIBUTE_TYPES.STRING.id:\n field_type = 'string'\n value = obj.value_string.value\n elif field_type == db.CI_ATTRIBUTE_TYPES.FLOAT.id:\n field_type = 'float'\n value = obj.value_float.value\n elif field_type == db.CI_ATTRIBUTE_TYPES.DATE.id:\n field_type = 'date'\n value = obj.value_date.value\n elif field_type == db.CI_ATTRIBUTE_TYPES.CHOICE.id:\n field_type = 'choice'\n value = obj.value_choice.value\n data['attribute_%s_%s' % (field_type, obj.attribute_id)] = value\n return data\n\n def form_initial(self, ci):\n data = dict(\n technical_owner=', '.join(ci.get_technical_owners()),\n ci=self.ci,\n )\n return data\n\n def check_perm(self):\n if not self.get_permissions_dict().get(\n 'edit_configuration_item_info_generic_perm', False):\n return HttpResponseForbidden()\n\n def calculate_relations(self, ci_id):\n self.relations_contains = [\n (x, x.child, get_icon_for(x.child))\n for x in db.CIRelation.objects.filter(\n parent=ci_id, type=db.CI_RELATION_TYPES.CONTAINS.id)\n ]\n self.relations_parts = [\n (x, x.parent, get_icon_for(x.parent))\n for x in db.CIRelation.objects.filter(\n child=ci_id,\n type=db.CI_RELATION_TYPES.CONTAINS.id)\n ]\n self.relations_requires = [\n (x, x.child, get_icon_for(x.parent))\n for x in db.CIRelation.objects.filter(\n parent=ci_id, type=db.CI_RELATION_TYPES.REQUIRES.id)\n ]\n self.relations_isrequired = [\n (x, x.parent, get_icon_for(x.parent))\n for x in db.CIRelation.objects.filter(\n child=ci_id, type=db.CI_RELATION_TYPES.REQUIRES.id)\n ]\n self.relations_hasrole = [\n (x, x.child, get_icon_for(x.parent))\n for x in db.CIRelation.objects.filter(\n parent=ci_id, type=db.CI_RELATION_TYPES.HASROLE.id)\n ]\n self.relations_isrole = [\n (x, x.parent, get_icon_for(x.parent))\n for x in db.CIRelation.objects.filter(\n child=ci_id, type=db.CI_RELATION_TYPES.HASROLE.id)\n ]\n\n def get_ci_id(self):\n \"\"\" 2 types of id can land here. \"\"\"\n ci_id = self.kwargs.get('ci_id')\n if ci_id.find('-') >= 0:\n ci = db.CI.objects.get(uid=ci_id)\n return ci.id\n else:\n return self.kwargs.get('ci_id', None)\n\n def get(self, *args, **kwargs):\n if self.check_perm():\n return self.check_perm()\n self.initialize_vars()\n try:\n ci_id = self.get_ci_id()\n except:\n # editing/viewing Ci which doesn's exists.\n return HttpResponseRedirect('/cmdb/ci/jira_ci_unknown')\n if ci_id:\n self.ci = get_object_or_404(db.CI, id=ci_id)\n # preview only for devices\n if (self.ci.content_object and\n self.ci.content_type.name == 'device'):\n self.show_in_ralph = True\n self.ralph_ci_link = (\"/ui/search/info/%d\" %\n self.ci.content_object.id)\n self.service_name = self.get_first_parent_venture_name(ci_id)\n self.problems = db.CIProblem.objects.filter(\n ci=self.ci).order_by('-time').all()\n self.incidents = db.CIIncident.objects.filter(\n ci=self.ci).order_by('-time').all()\n self.git_changes = [\n x.content_object for x in db.CIChange.objects.filter(\n ci=self.ci, type=db.CI_CHANGE_TYPES.CONF_GIT.id)]\n self.device_attributes_changes = [\n x.content_object for x in db.CIChange.objects.filter(\n ci=self.ci, type=db.CI_CHANGE_TYPES.DEVICE.id)]\n self.ci_attributes_changes = [\n x.content_object for x in db.CIChange.objects.filter(\n ci=self.ci, type=db.CI_CHANGE_TYPES.CI.id).order_by('time')\n ]\n reps = db.CIChangePuppet.objects.filter(ci=self.ci).all()\n for report in reps:\n puppet_logs = db.PuppetLog.objects.filter(\n cichange=report).all()\n self.puppet_reports.append(\n dict(report=report, logs=puppet_logs)\n )\n self.zabbix_triggers = db.CIChangeZabbixTrigger.objects.filter(\n ci=self.ci).order_by('-lastchange')\n self.so_events = db.CIChange.objects.filter(\n type=db.CI_CHANGE_TYPES.STATUSOFFICE.id,\n ci=self.ci).all()\n self.calculate_relations(ci_id)\n self.form_options['instance'] = self.ci\n self.form_options['initial'] = self.form_initial(self.ci)\n self.form_attributes_options['initial'] = self.custom_form_initial(\n self.ci)\n self.form_attributes = EditAttributeFormFactory(\n ci=self.ci).factory(\n **self.form_attributes_options)\n self.form = self.Form(**self.form_options)\n return super(Edit, self).get(*args, **kwargs)\n\n def initialize_vars(self):\n self.form_attributes = {}\n self.service_name = ''\n self.relations_contains = []\n self.relations_requires = []\n self.relations_parts = []\n self.relations_hasrole = []\n self.relations_isrole = []\n self.relations_isrequired = []\n\n self.puppet_reports = []\n self.git_changes = []\n self.zabbix_triggers = []\n self.ci_attributes_changes = []\n self.device_attributes_changes = []\n self.form = None\n self.ci = None\n\n self.relations_contains = []\n self.relations_requires = []\n self.relations_parts = []\n self.relations_hasrole = []\n self.relations_isrole = []\n self.relations_isrequired = []\n self.puppet_reports = []\n self.git_changes = []\n self.device_attributes_changes = []\n self.zabbix_triggers = []\n self.so_events = []\n self.problems = []\n self.incidents = []\n self.show_in_ralph = False\n self.ralph_ci_link = \"\"\n\n @nested_commit_on_success\n def post(self, *args, **kwargs):\n self.initialize_vars()\n ci_id = self.kwargs.get('ci_id')\n if ci_id:\n self.ci = get_object_or_404(db.CI, id=ci_id)\n self.form_options['instance'] = self.ci\n self.form = self.Form(\n self.request.POST, **self.form_options\n )\n self.form_attributes = EditAttributeFormFactory(\n ci=self.ci).factory(\n self.request.POST,\n **self.form_attributes_options\n )\n if self.form.is_valid() and self.form_attributes.is_valid():\n model = self.form.save(commit=False)\n model.id = self.ci.id\n model.owners.clear()\n model.layers.clear()\n layers = self.form_attributes.data.getlist('base-layers')\n for layer in layers:\n model.layers.add(CILayer.objects.get(pk=int(layer)))\n owners_t = self.form_attributes.data.getlist(\n 'base-technical_owners')\n for owner in owners_t:\n own = CIOwnership(\n ci=model,\n owner=CIOwner.objects.get(pk=owner),\n type=1,)\n own.save()\n owners_b = self.form_attributes.data.getlist(\n 'base-business_owners')\n for owner in owners_b:\n own = CIOwnership(\n ci=model, owner=CIOwner.objects.get(pk=owner),\n type=2,)\n own.save()\n model.save(user=self.request.user)\n self.form_attributes.ci = model\n self.form_attributes.save()\n messages.success(self.request, \"Changes saved.\")\n return HttpResponseRedirect(self.request.path)\n else:\n messages.error(self.request, \"Correct the errors.\")\n return super(Edit, self).get(*args, **kwargs)\n\n\nclass View(Edit):\n template_name = 'cmdb/view_ci.html'\n Form = CIViewForm\n\n def get_context_data(self, **kwargs):\n ret = super(View, self).get_context_data(**kwargs)\n ret.update({\n 'label': 'View CI: {} (uid: {})'.format(self.ci.name, self.ci.uid),\n 'subsection': 'Info - %s' % self.ci.name\n })\n return ret\n\n def check_perm(self):\n if not self.get_permissions_dict().get(\n 'read_configuration_item_info_generic_perm', False):\n return HttpResponseForbidden()\n\n def post(self, *args, **kwargs):\n \"\"\" Overwrite parent class post \"\"\"\n return HttpResponseForbidden()\n\n\nclass ViewIframe(View):\n template_name = 'cmdb/view_ci_iframe.html'\n\n def get_context_data(self, **kwargs):\n ret = super(ViewIframe, self).get_context_data(**kwargs)\n ret.update({'target': '_blank'})\n return ret\n\n\nclass ViewJira(ViewIframe):\n template_name = 'cmdb/view_ci_iframe.html'\n\n def get_ci_id(self):\n ci_uid = self.kwargs.get('ci_uid', None)\n ci = db.CI.objects.get(uid=ci_uid)\n #raise 404 in case of missing CI\n return ci.id\n\n def get_context_data(self, **kwargs):\n ret = super(ViewJira, self).get_context_data(**kwargs)\n ret.update({'span_number': '4'}) # height of screen\n return ret\n\n\nclass Search(BaseCMDBView):\n template_name = 'cmdb/search_ci.html'\n Form = CISearchForm\n cis = []\n\n def get_context_data(self, **kwargs):\n subsection = ''\n layer = self.request.GET.get('layer')\n type = self.request.GET.get('type')\n if layer:\n subsection += '%s - ' % CILayer.objects.get(id=layer)\n elif type:\n type = CI_TYPES.NameFromID(int(type))\n subsection += '%s - ' % CI_TYPES.DescFromName(type)\n subsection += 'Search'\n if layer is None:\n sidebar_selected = 'all-cis'\n else:\n select = CILayer.objects.get(id=layer)\n sidebar_selected = slugify(select.name)\n ret = super(Search, self).get_context_data(**kwargs)\n ret.update({\n 'table_header': self.table_header,\n 'table_body': self.table_body,\n 'page': self.page,\n 'pages': _get_pages(self.paginator, self.page_number),\n 'sort': self.request.GET.get('sort', ''),\n 'layer': self.request.GET.get('layer', ''),\n 'type': self.request.GET.get('type', ''),\n 'form': self.form,\n 'sidebar_selected': sidebar_selected,\n 'subsection': subsection,\n })\n return ret\n\n def form_initial(self, values):\n return values\n\n def get_table_header(self, layer, type_):\n DEFAULT_COLS = (\n {'label': 'Type', 'name': 'type', 'sortable': 1},\n {'label': 'Layer', 'name': 'layer', 'sortable': 0},\n {'label': 'Venture', 'name': 'Venture', 'sortable': 0},\n {'label': 'Service', 'name': 'Service', 'sortable': 0},\n {'label': 'PCI Scope', 'name': 'pci', 'sortable': 0},\n )\n table_header = (\n {'label': 'Name', 'name': 'uid', 'sortable': 1},\n {'label': 'CI UID', 'name': 'type', 'sortable': 0},\n )\n if type_ is None:\n table_header += DEFAULT_COLS\n elif type_ == CI_TYPES.APPLICATION.id:\n table_header += (\n {'label': 'Type', 'name': 'type', 'sortable': 1},\n {'label': 'Layer', 'name': 'layer', 'sortable': 0},\n {'label': 'Venture', 'name': 'Venture', 'sortable': 0},\n {'label': 'Service', 'name': 'Service', 'sortable': 0},\n {'label': 'PCI Scope', 'name': 'pci', 'sortable': 0},\n )\n elif type_ == CI_TYPES.DEVICE.id:\n table_header += (\n {'label': 'Parent Device', 'name': 'Parent Device',\n 'sortable': 1},\n {'label': 'Network', 'name': 'Network', 'sortable': 0},\n {'label': 'DC', 'name': 'DC', 'sortable': 0},\n {'label': 'Venture', 'name': 'Venture', 'sortable': 0},\n {'label': 'Service', 'name': 'Service', 'sortable': 0},\n {'label': 'PPCI Scope', 'name': 'PPCI Scope', 'sortable': 0},\n )\n elif type_ == CI_TYPES.PROCEDURE.id:\n table_header += DEFAULT_COLS\n elif type_ == CI_TYPES.VENTURE.id:\n table_header += (\n {'label': 'Parent venture', 'name': 'Parent venture',\n 'sortable': 1},\n {'label': 'Child Ventures', 'name': 'Child Ventures',\n 'sortable': 1},\n {'label': 'Service', 'name': 'Service', 'sortable': 1},\n {'label': 'Technical Owner', 'name': 'Technical Owner',\n 'sortable': 1},\n {'label': 'Business Owner', 'name': 'Business Owner',\n 'sortable': 1},\n )\n elif type_ == CI_TYPES.VENTUREROLE.id:\n table_header += (\n {'label': 'Parent venture', 'name': 'Parent venture',\n 'sortable': 1},\n {'label': 'Service', 'name': 'Service', 'sortable': 1},\n {'label': 'Technical Owner', 'name': 'Technical Owner',\n 'sortable': 1},\n )\n elif type_ == CI_TYPES.BUSINESSLINE.id:\n table_header += (\n {'label': 'Services contained',\n 'name': 'Services contained', 'sortable': 0},\n )\n elif type_ == CI_TYPES.SERVICE.id:\n table_header += (\n {'label': 'Contained Venture',\n 'name': 'Contained Venture', 'sortable': 1},\n {'label': 'Business Line', 'name': 'Business Line',\n 'sortable': 0},\n {'label': 'Technical Owner', 'name': 'Technical Owner',\n 'sortable': 0},\n {'label': 'Business Owner', 'name': 'Business Owner',\n 'sortable': 0},\n )\n elif type_ == CI_TYPES.NETWORK.id:\n table_header += DEFAULT_COLS\n elif type_ == CI_TYPES.DATACENTER.id:\n table_header += DEFAULT_COLS\n elif type_ == CI_TYPES.NETWORKTERMINATOR.id:\n table_header += DEFAULT_COLS\n table_header += (\n {'label': 'Operations', 'name': 'Operations', 'sortable': 0},\n )\n return table_header\n\n def get_name(self, i, icon):\n return mark_safe(' '\n ' %s' % (\n escape(i.id), escape(icon), escape(i.name))\n )\n\n def get_uid(self, i):\n return mark_safe('%s' % (\n escape(i.id), escape(i.uid)))\n\n def get_layer(self, i):\n return ', '.join(unicode(x) for x in i.layers.select_related())\n\n def get_parent_dev(self, i):\n parent = '-'\n try:\n parent = i.content_object.parent\n except AttributeError:\n pass\n return parent\n\n def get_network(self, i):\n network = '-'\n try:\n networks = i.content_object.ipaddress_set.all()\n network = ', '.join(unicode(x) for x in networks)\n except AttributeError:\n pass\n return network\n\n def get_dc(self, i):\n dc = '-'\n try:\n dc = i.content_object.dc\n except AttributeError:\n pass\n return dc\n\n def get_owners(self, i, filter):\n owners = ', '.join(\"%s %s\" % (b.owner.first_name, b.owner.last_name)\n for b in i.ciownership_set.filter(type=filter)),\n return owners[0]\n\n def get_bl(self, i, relations):\n business_line = '-'\n rel_bl = relations.filter(\n child=i.id, parent__type__id=CI_TYPES.BUSINESSLINE.id\n )\n for bl in rel_bl:\n business_line = ('%s' % (\n escape(bl.parent.id), escape(bl.parent.name))\n )\n return mark_safe(business_line)\n\n def get_venture(self, relations, i, child=False):\n venture = []\n if child is False:\n ven = relations.filter(\n child=i.id,\n parent__type__id=CI_TYPES.VENTURE.id\n )\n for v in ven:\n venture.append(\n '%s' % (\n escape(v.parent.id), escape(v.parent.name))\n )\n elif child is True:\n ven = relations.filter(\n parent=i.id,\n child__type__id=CI_TYPES.VENTURE.id\n )\n for v in ven:\n venture.append(\n '%s' % (\n escape(v.child.id), escape(v.child.name))\n )\n return mark_safe(', '.join(x for x in venture))\n\n def get_service(self, relations, i):\n services = ''\n servi = relations.filter(\n parent=i.id, child__type__id=CI_TYPES.SERVICE.id\n )\n for s in servi:\n services += '%s, ' % escape(s.child.name)\n return mark_safe(services)\n\n def get_operations(self, i):\n return mark_safe('Edit | '\n 'View') % (\n escape(i.id), escape(i.id)\n )\n\n def get(self, *args, **kwargs):\n values = self.request.GET\n cis = db.CI.objects.all()\n uid = values.get('uid')\n state = values.get('state')\n status = values.get('status')\n type_ = int(values.get('type'))\n layer = values.get('layer')\n parent_id = int(values.get('parent', 0) or 0)\n if values:\n if uid:\n cis = cis.filter(Q(name__icontains=uid) | Q(uid=uid))\n if state:\n cis = cis.filter(state=state)\n if status:\n cis = cis.filter(status=status)\n if type_:\n cis = cis.filter(type=type_)\n if layer:\n cis = cis.filter(layers=layer)\n if parent_id:\n cis = cis.filter(child__parent__id=parent_id)\n sort = self.request.GET.get('sort', 'name')\n if sort:\n cis = cis.order_by(sort)\n if values.get('top_level'):\n cis = cis.filter(child__parent=None)\n page = self.request.GET.get('page') or 1\n self.page_number = int(page)\n self.paginator = Paginator(cis, ROWS_PER_PAGE)\n try:\n cis = self.paginator.page(page)\n except PageNotAnInteger:\n cis = self.paginator.page(1)\n page = 1\n except EmptyPage:\n cis = self.paginator.page(self.paginator.num_pages)\n page = self.paginator.num_pages\n self.page = cis\n table_body = []\n relations = CIRelation.objects.all()\n t_owners = 1\n b_owners = 2\n for i in cis:\n icon = get_icon_for(i)\n venture = self.get_venture(relations, i)\n service = self.get_service(relations, i)\n DEFAULT_ROWS = [\n {'name': 'name', 'value': self.get_name(i, icon)},\n {'name': 'uid', 'value': self.get_uid(i)},\n {'name': 'type', 'value': i.type.name},\n {'name': 'layer', 'value': self.get_layer(i)},\n {'name': 'layer', 'value': venture},\n {'name': 'service', 'value': service},\n {'name': 'pci_scope', 'value': i.pci_scope},\n {'name': 'operations', 'value': self.get_operations(i)}\n ]\n if type_ is None:\n table_body.append(DEFAULT_ROWS)\n elif type_ == CI_TYPES.APPLICATION:\n table_body.append(DEFAULT_ROWS)\n elif type_ == CI_TYPES.DEVICE:\n row = [\n {'name': 'name', 'value': self.get_name(i, icon)},\n {'name': 'uid', 'value': self.get_uid(i)},\n {'name': 'parent-dev', 'value': self.get_parent_dev(i)},\n {'name': 'network', 'value': self.get_network(i)},\n {'name': 'dc', 'value': self.get_dc(i)},\n {'name': 'venture', 'value': venture},\n {'name': 'service', 'value': service},\n {'name': 'pci_scope', 'value': i.pci_scope},\n {'name': 'operations', 'value': self.get_operations(i)}\n ]\n table_body.append(row)\n elif type_ == CI_TYPES.VENTURE:\n venture_c = self.get_venture(relations, i, child=True)\n b_own = self.get_owners(i, b_owners)\n t_own = self.get_owners(i, t_owners)\n row = [\n {'name': 'name', 'value': self.get_name(i, icon)},\n {'name': 'uid', 'value': self.get_uid(i)},\n {'name': 'venture', 'value': venture},\n {'name': 'venture-child', 'value': venture_c},\n {'name': 'service', 'value': service},\n {'name': 't_owners', 'value': t_own},\n {'name': 'b_owners', 'value': b_own},\n {'name': 'operations', 'value': self.get_operations(i)}\n ]\n table_body.append(row)\n elif type_ == CI_TYPES.VENTUREROLE:\n t_own = self.get_owners(i, t_owners)\n row = [\n {'name': 'name', 'value': self.get_name(i, icon)},\n {'name': 'uid', 'value': self.get_uid(i)},\n {'name': 'venture', 'value': venture},\n {'name': 'service', 'value': service},\n {'name': 't_owners', 'value': t_own},\n {'name': 'operations', 'value': self.get_operations(i)}\n ]\n table_body.append(row)\n elif type_ == CI_TYPES.BUSINESSLINE:\n ven = relations.filter(parent=i.id)\n services_contained = ', '.join(\n '%s' %\n (v.child.id, v.child.name) for v in ven)\n row = [\n {'name': 'name', 'value': self.get_name(i, icon)},\n {'name': 'uid', 'value': self.get_uid(i)},\n {'name': 'venture', 'value': services_contained},\n {'name': 'operations', 'value': self.get_operations(i)}\n ]\n table_body.append(row)\n elif type_ == CI_TYPES.SERVICE.id:\n b_own = self.get_owners(i, b_owners)\n t_own = self.get_owners(i, t_owners)\n row = [\n {'name': 'name', 'value': self.get_name(i, icon)},\n {'name': 'uid', 'value': self.get_uid(i)},\n {'name': 'venture-child', 'value': venture},\n {'name': 'bl', 'value': self.get_bl(i, relations)},\n {'name': 't_owners', 'value': t_own},\n {'name': 'b_owners', 'value': b_own},\n {'name': 'operations', 'value': self.get_operations(i)}\n ]\n table_body.append(row)\n else:\n table_body.append(DEFAULT_ROWS)\n self.table_header = self.get_table_header(layer, type_)\n self.table_body = table_body,\n form_options = dict(\n label_suffix='',\n initial=self.form_initial(values),\n )\n self.form = self.Form(**form_options)\n return super(Search, self).get(*args, **kwargs)\n\n\nclass Index(BaseCMDBView):\n template_name = 'cmdb/index.html'\n\n def get_context_data(self, **kwargs):\n ret = super(Index, self).get_context_data(**kwargs)\n return ret\n\n\nclass ViewUnknown(BaseCMDBView):\n template_name = 'cmdb/view_ci_error.html'\n\n def get_context_data(self, **kwargs):\n ret = super(ViewUnknown, self).get_context_data(**kwargs)\n ret.update({\n 'error_message':\n 'This Configuration Item cannot be found in the CMDB.'})\n return ret\n\n\nclass CMDB(View):\n template_name = 'cmdb/view_ci_ralph.html'\n read_perm = Perm.read_configuration_item_info_generic\n\n def get_ci_id(self, *args, **kwargs):\n device_id = self.kwargs.get('device')\n try:\n return CI.objects.get(\n type=CI_TYPES.DEVICE.id,\n object_id=device_id\n ).id\n except CI.objects.DoesNotExist:\n return None\n\n def get_context_data(self, **kwargs):\n ret = super(View, self).get_context_data(**kwargs)\n ret.update({\n 'ci': self.ci,\n 'label': 'View CI: {} (uid: {})'.format(self.ci.name, self.ci.uid),\n 'url_query': self.request.GET,\n 'components': _get_details(\n self.ci.content_object, purchase_only=False\n )\n })\n return ret\n\n\nclass GraphsTree(BaseCMDBView):\n template_name = 'cmdb/graphs_tree.html'\n\n @staticmethod\n def get_ajax(request):\n root = CI.objects.get(pk=request.GET.get('ci_id'))\n response_dict = search_tree({}, root)\n return HttpResponse(\n simplejson.dumps(response_dict),\n mimetype='application/json',\n )\n\n def get_initial(self):\n return dict(\n ci=self.request.GET.get('ci'),\n )\n\n def get_context_data(self, *args, **kwargs):\n ret = super(GraphsTree, self).get_context_data(**kwargs)\n form = SearchImpactForm(initial=self.get_initial())\n ret.update(dict(\n form=form,\n ))\n return ret\n\n\nclass Graphs(BaseCMDBView):\n template_name = 'cmdb/graphs.html'\n rows = []\n graph_data = {}\n\n def get_context_data(self, *args, **kwargs):\n ret = super(Graphs, self).get_context_data(**kwargs)\n form = SearchImpactForm(initial=self.get_initial())\n ret.update(dict(\n form=form,\n rows=self.rows,\n graph_data=simplejson.dumps(self.graph_data),\n ))\n return ret\n\n def get_initial(self):\n return dict(\n ci=self.request.GET.get('ci'),\n )\n\n def get(self, *args, **kwargs):\n ci_id = self.request.GET.get('ci')\n self.rows = []\n if ci_id:\n ci_names = dict([(x.id, x.name) for x in CI.objects.all()])\n i = ImpactCalculator()\n st, pre = i.find_affected_nodes(int(ci_id))\n nodes = [(\n key, ci_names[key],\n get_icon_for(CI.objects.get(pk=key))) for key in st.keys()]\n relations = [dict(\n child=x,\n parent=st.get(x),\n parent_name=ci_names[x],\n type=i.graph.edge_attributes((st.get(x), x))[0],\n child_name=ci_names[st.get(x)])\n for x in st.keys() if x and st.get(x)]\n self.graph_data = dict(\n nodes=nodes, relations=relations)\n self.rows = [dict(\n icon=get_icon_for(CI.objects.get(pk=x)),\n ci=CI.objects.get(pk=x)) for x in pre]\n return super(BaseCMDBView, self).get(*args, **kwargs)\n\n\n","sub_path":"src/ralph/cmdb/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":47280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"498108220","text":"#!/usr/bin/env python3\n\nimport re\nimport sys\nimport signal\nimport argparse\nimport subprocess\nfrom os import walk\n\n\ndef signal_handler(signal, frame):\n if signal is signal.SIGINT:\n print('That\\'s okay, see you later!')\n sys.exit(1)\n\nsignal.signal(signal.SIGINT, signal_handler)\n\n\ndef get_info(language):\n bash_command = \"exercism fetch {}\".format(language)\n output = subprocess.check_output(['bash', '-c', bash_command])\n\n project_name = ''\n path = ''\n is_new_problem = True\n for line in output.decode(\"utf-8\").split('\\n'):\n if line.startswith(language):\n try:\n project_name = re.search('\\(([^\\)]+)\\)', line).group(1)\n path = re.search('\\/\\s*(.*)$', line).group(0)\n except AttributeError:\n print('Problem with parsing project name and path to project')\n sys.exit(2)\n elif 'new: 0' in line:\n is_new_problem = False\n\n if not project_name:\n print('Couldn\\'t find any project')\n sys.exit(2)\n\n file_name = project_name.replace(' ', '')\n return file_name, path, is_new_problem\n\n\ndef fetch_problem(language):\n bash_command = \"exercism fetch {}\".format(language)\n subprocess.call(['bash', '-c', bash_command])\n\n _, path, _ = get_info(language)\n\n generate_project(language, path)\n\n\ndef submit_solution(language, submit_all, suffix):\n file_name, path, is_new_problem = get_info(language)\n\n if is_new_problem:\n print('Detected that \\'submit\\' was called before \\'fetch\\'. Cowardly shutting down.')\n generate_project(language, path)\n sys.exit(1)\n\n file_suffixes = {\"swift\": \"swift\"}\n if suffix:\n file_suffix = suffix\n elif language in file_suffixes:\n file_suffix = file_suffixes[language]\n else:\n file_suffix = input('What suffix is your language using? ')\n\n if submit_all:\n bash_command = \"exercism submit \"\n (_, _, filenames) = walk(path).next()\n\n for file in filenames:\n bash_command += \"{0}/Sources/{1}.{2} \".format(path, file, file_suffix)\n else:\n bash_command = \"exercism submit {0}/Sources/{1}.{2}\".format(path, file_name, file_suffix)\n subprocess.call(['bash', '-c', bash_command])\n\n\ndef generate_project(language, path):\n generate_commands = {\"swift\": \"swift package -C {} generate-xcodeproj\".format(path)}\n\n if language in generate_commands:\n bash_command = generate_commands[language]\n subprocess.call(['bash', '-c', bash_command])\n\n\nparser = argparse.ArgumentParser(epilog='use -l or --language to give language name to script directly')\nsubparsers = parser.add_subparsers(dest='command')\nsubparser_fetch = subparsers.add_parser('fetch', help='fetch a new problem or get info about the current one')\nsubparser_fetch.add_argument('language', type=str, help='language you wish to use')\nsubparser_submit = subparsers.add_parser('submit', help='submit a solution to current problem')\nsubparser_submit.add_argument('language', type=str, help='language you wish to use')\nsubparser_submit.add_argument('-a', '--allFiles', help='use default name and don\\'t ask for a new one', action='store_true')\nsubparser_submit.add_argument('-s', '--suffix', help='give file suffix to script directly')\nargs = parser.parse_args()\n\nlanguage = ''\nif not hasattr(args, 'language'):\n language = input('Which language do you wish to exercise today? ')\nelse:\n language = args.language\n\nif 'fetch' in args.command:\n fetch_problem(language)\nelif 'submit' in args.command:\n submit_solution(language, args.allFiles, args.suffix)\nelse:\n print('Sorry, I don\\'t understand. Try again?\\n')\n parser.print_help()\n sys.exit(2)","sub_path":"EasyExercism.py","file_name":"EasyExercism.py","file_ext":"py","file_size_in_byte":3717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"122910095","text":"\"\"\"\nauthor : Park Min Hyeok\ngithub : https://github.com/m1nnh\ne-mail : alsgur9784@naver.com\n\ntitle : N과 M(5)\ndescription : 백트레킹\n\"\"\"\n\ndef recursive_comb(N, M, array, answer):\n if len(answer) == M:\n print(*answer)\n return\n\n for i in range(len(array)):\n if len(answer) == 0:\n answer.append(array[i])\n recursive_comb(N, M, array, answer)\n answer.pop()\n\n elif array[i] not in answer:\n answer.append(array[i])\n recursive_comb(N, M, array, answer)\n answer.pop()\n\nif __name__ == \"__main__\":\n N, M = map(int, input().split())\n array = list(map(int, input().split()))\n\n array.sort()\n\n recursive_comb(N, M, array, [])","sub_path":"BOJ/BOJ-15654.py","file_name":"BOJ-15654.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"405768183","text":"#!/usr/bin/python -O\n# -*- coding: iso-8859-1 -*-\n# -*- coding: latin-1 -*-\n# by Loreto Notarantonio 2013, February\n# ######################################################################################\n\nimport os, sys\nimport argparse\n\nthis_mod = sys.modules[__name__]\n\n# ######################################################################################\n# # ParseInput()\n# ######################################################################################\ndef ParseInput(gv):\n # global logger, isADMIN, force, TAByel, TABerr, TABcyan, cYEL, cCYAN, cRESET, mainArgs\n global isADMIN, TAByel, TABerr, TABcyan, cYEL, cCYAN, cRESET, mainArgs\n\n\n serviceActionsDict = dict(\n ssh = \"kitty ssh verso un server\",\n jbossgui = \"apertura di una cli gui verso JBoss\",\n gui = \"apertura di una cli gui verso JBoss\",\n )\n\n\n TAByel = gv.Ln.cYELLOW + ' '*8\n TABerr = gv.Ln.cERROR + ' '*8\n TABcyan = gv.Ln.cCYAN + ' '*8\n cYEL = gv.Ln.cYELLOW\n cCYAN = gv.Ln.cCYAN\n cRESET = gv.Ln.cRESET\n\n # Forziamo il -h se necessario\n if len(sys.argv) < 2: sys.argv.append('-h')\n\n totalCMDLIST = '\\n'\n for key, val in sorted(serviceActionsDict.items()):\n totalCMDLIST += ' * {0:<12} : {1}\\n'.format(key, val)\n\n mainHelp=\"\"\"\n Immettere uno dei seguenti comandi:\n (con il parametro -h se si desidera lo specifico help)\n {CMDLIST}\\n\"\"\".format(CMDLIST=totalCMDLIST)\n\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawTextHelpFormatter, # indicates that description and epilog are already correctly formatted and should not be line-wrapped:\n description=cYEL+'tool per esecuzione comandi di comodo.',\n usage='', # non voglio lo usage\n epilog=cYEL+mainHelp+cRESET,\n conflict_handler='resolve',\n )\n\n\n parser.add_argument('ACTION', help='Command/Action to run')\n\n # parse_args defaults to [1:] for args, but you need to\n # exclude the rest of the args too, or validation will fail\n mainArgs = parser.parse_args(sys.argv[1:2])\n\n\n if not (mainArgs.ACTION in serviceActionsDict.keys()):\n print ('Unrecognized action', mainArgs.ACTION)\n parser.print_help()\n exit(1)\n\n elif not hasattr(this_mod, mainArgs.ACTION):\n print (TABcyan+ '[{0}] - Commando not yet implemented!'.format(mainArgs.ACTION))\n exit(1)\n\n\n # invoke method with name 'mainArgs.ACTION'\n # ritorna un nameSpace\n InputPARAM = getattr(this_mod, mainArgs.ACTION)()\n\n # aggiungiamo manualmente valori alla struttura\n InputPARAM.ACTION = mainArgs.ACTION\n\n return vars(InputPARAM) # formato dict\n\n\n\n\n###################################################\n# - SSH\n###################################################\ndef ssh():\n return commonParsing(mainArgs.ACTION)\n\n###################################################\n# - GUI\n###################################################\ndef gui():\n return commonParsing(mainArgs.ACTION)\n\n###################################################\n# - GUI\n###################################################\ndef jbossgui():\n return commonParsing(mainArgs.ACTION)\n\n\n\n###################################################\n# - commonParsing\n###################################################\ndef commonParsing(actionName, DESCR=''):\n\n usageMsg = \"\\n {0} {1} JBName {2}[options]\".format( cYEL, actionName, cRESET)\n myParser = argparse.ArgumentParser( description=actionName + ' Command',\n add_help=True, usage=usageMsg,\n # formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n formatter_class=argparse.RawTextHelpFormatter,\n # formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n\n # ----------- SSH\n if actionName in ['ssh']:\n myParser.add_argument( \"-s\", \"--server\",\n # action=\"store_true\",\n dest=\"serverName\",\n required=True,\n help=cCYAN +\n \"\"\"server name to connect!\n \"\"\"+ cRESET)\n\n myParser.add_argument( \"-lt\", \"--local-tunnel\",\n dest=\"localTunnel\",\n default=None,\n help=cCYAN+\n \"\"\"se si desidera avere un LOCAL tunnel\nEs.:\n [localAddr:]localPort:destHost:destPort\n \"\"\"+cRESET)\n\n myParser.add_argument( \"-rt\", \"--remote-tunnel\",\n dest=\"remoteTunnel\",\n default=None,\n help=cCYAN+\n \"\"\"se si desidera avere un REMOTE tunnel\nEs.:\n [remoteAddr:]remotePort:destHost:destPort\n \"\"\"+cRESET)\n\n myParser.add_argument( \"-L\", \"--local\",\n dest=\"localTunnelPort\",\n default=None,\n help=cCYAN+\n \"\"\"se si desidera avere un tunnel\n \"\"\"+cRESET)\n\n myParser.add_argument( \"-R\", \"--remote\",\n dest=\"remoteTunnelPort\",\n default=None,\n help=cCYAN+\n \"\"\"se si desidera avere un tunnel\n \"\"\"+cRESET)\n\n myParser.add_argument( \"-LR\", \"--loc_rem\",\n dest=\"localRemoteTunnelPort\",\n default=None,\n help=cCYAN+\n \"\"\"se si desidera avere un tunnel\n \"\"\"+cRESET)\n\n\n # ----------- SSH\n elif actionName in ['gui', 'jbossgui']:\n myParser.add_argument( \"-s\", \"--server\",\n # action=\"store_true\",\n dest=\"serverName\",\n required=True,\n help=cCYAN +\n \"\"\"server name to connect!\n \"\"\"+ cRESET)\n args = myParser.parse_args(sys.argv[2:])\n\n return args\n\n\n","sub_path":"PrjPackage/ParseInput.py","file_name":"ParseInput.py","file_ext":"py","file_size_in_byte":6480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"351032294","text":"def init():\n\n global stack, level\n\n stack[ level ] = 0\n\ndef succ():\n\n global stack, level, k\n\n if stack[ level ] < n:\n\n stack[ level ] += 1\n\n return True\n\n return False\n\ndef sol():\n\n global level, k\n\n return level == k\n\ndef valid():\n\n global stack, level\n\n for i in range(1, level):\n\n if stack[ i ] == stack[ level ]:\n\n return False\n\n return True\n\ndef printf():\n\n global stack, k;\n\n for i in range(1, k + 1):\n\n print(stack[ i ], end = ' ')\n print()\n\ndef bk():\n\n global level\n\n level = 1\n init()\n\n while not level <= 0:\n\n hs = True\n iv = False\n\n while hs is True and iv is False:\n\n hs = succ()\n\n if hs is True:\n iv = valid()\n\n if hs is True:\n\n if sol():\n printf()\n else:\n level += 1\n init()\n else:\n level -= 1\n\n\ndef main():\n global n, k, stack\n #A = {1,2,3,4}\n n = 4\n k = 2\n stack = [ 0 ] * (k + 1)\n bk()\nmain()\n","sub_path":"foundations/backtracking/arrangements.py","file_name":"arrangements.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"536099772","text":"import socket\nimport threading\n\nfrom PyQt5.QtCore import QObject, pyqtSignal\n\nfrom utils import calc_checksum\n\n\nclass TcpServer(QObject):\n data_received_signal = pyqtSignal(bytes)\n\n def __init__(self, port):\n super(TcpServer, self).__init__()\n self.buffer_size = 2048\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.port = port\n self.server_thread = None\n\n def listen(self):\n try:\n self.socket.bind((\"\", self.port))\n print(\"Chat server is running on port {}\".format(self.port))\n self.socket.listen()\n thread = threading.Thread(target=self._receive_connection)\n thread.start()\n except socket.error:\n print(\"Chat server is not running\")\n\n def _receive_connection(self):\n while True:\n try:\n connection, address = self.socket.accept()\n self._receive_data(connection)\n except socket.error:\n break\n self.socket.close()\n\n def _receive_data(self, connection):\n while True:\n data = connection.recv(self.buffer_size)\n if not data:\n break\n self.data_received_signal.emit(data)\n connection.close()\n\n\nclass TcpClient:\n def __init__(self, host, port):\n self.buffer_size = 2048\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.port = port\n self.host = host\n\n def send(self, data):\n self._connect()\n self.socket.send(data)\n self.socket.close()\n\n def _connect(self):\n self.socket.connect((self.host, self.port))\n\n\nclass UdpServer(QObject):\n data_received_signal = pyqtSignal(bytes)\n\n def __init__(self, port):\n super(UdpServer, self).__init__()\n self.buffer_size = 2048\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.port = port\n\n def listen(self):\n try:\n self.socket.bind((\"\", self.port))\n print(\"Chat server is running on port {}\".format(self.port))\n thread = threading.Thread(target=self._receive_data)\n thread.start()\n except socket.error:\n print(\"Chat server is not running\")\n\n def _receive_data(self):\n while True:\n try:\n data, address = self.socket.recvfrom(self.buffer_size)\n self.data_received_signal.emit(data)\n except socket.error:\n break\n self.socket.close()\n\n\nclass UdpClient:\n def __init__(self, host, port):\n self.buffer_size = 2048\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.port = port\n self.host = host\n\n def send(self, data):\n self.socket.sendto(data, (self.host, self.port))\n\n\nclass ReliableUdpServer(QObject):\n data_received_signal = pyqtSignal(bytes)\n\n def __init__(self, host, port):\n super(ReliableUdpServer, self).__init__()\n self.buffer_size = 2048\n self.expected_seq_no = 0\n self.source_port = port\n self.sending_host = host\n self.sending_port = port - 2\n self.sender_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.receiver_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n def listen(self):\n try:\n self.receiver_socket.bind((\"\", self.source_port))\n print(\"Chat server is running on port {}\".format(self.source_port))\n thread = threading.Thread(target=self._receive_data)\n thread.start()\n except socket.error:\n print(\"Chat server is not running\")\n\n def _receive_data(self):\n while True:\n data, address = self.receiver_socket.recvfrom(self.buffer_size)\n received_checksum = data[:32]\n seq_no = data[32]\n data = data[33:]\n validation_checksum = calc_checksum(data)\n if validation_checksum == received_checksum:\n ack_data = \"ACK\".encode(\"utf-8\") + seq_no.to_bytes(1, \"big\")\n self.sender_socket.sendto(calc_checksum(ack_data) + ack_data, (self.sending_host, self.sending_port))\n if seq_no == self.expected_seq_no:\n self.data_received_signal.emit(data)\n self.expected_seq_no = 1 - self.expected_seq_no\n else:\n nack_seq_no = 1 - self.expected_seq_no\n nack_data = \"ACK\".encode(\"utf-8\") + nack_seq_no.to_bytes(1, \"big\")\n self.sender_socket.sendto(calc_checksum(nack_data) + nack_data, (self.sending_host, self.sending_port))\n\n\nclass ReliableUdpClient:\n def __init__(self, host, port):\n self.buffer_size = 1024\n self.seq_no = 0\n self.destination_port = port\n self.destination_host = host\n self.listening_port = port - 2\n self.sender_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.receiver_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.receiver_socket.bind((\"\", self.listening_port))\n\n def send(self, data):\n self.receiver_socket.settimeout(1)\n self._send_until_acked(data)\n\n def _send_until_acked(self, data):\n acked = False\n while not acked:\n self.seq_no.to_bytes(1, \"big\")\n self.sender_socket.sendto(calc_checksum(data) + self.seq_no.to_bytes(1, \"big\") + data, (self.destination_host, self.destination_port))\n try:\n data, address = self.receiver_socket.recvfrom(2048)\n except socket.timeout:\n pass\n else:\n checksum = data[:32]\n ack_seq_no = data[35]\n if calc_checksum(data[32:]) == checksum and ack_seq_no == self.seq_no:\n acked = True\n self.seq_no = 1 - self.seq_no\n","sub_path":"networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":5894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"310416860","text":"#Importing required libraries\r\nimport heapq\r\nimport numpy as np\r\nimport math\r\nimport time\r\nimport matplotlib.pyplot as plt\r\n\r\n#Initializing the variables\r\nstart_time = time.time()\r\nplotx = []\r\nploty = []\r\nox =[]\r\noy =[]\r\nobstacle_space =[]\r\nobs =[]\r\nopen_list = []\r\nclosed_list = []\r\n\r\n#Takes the Start and Goal Node from the User\r\ndef Take_Input():\r\n start_x,start_y = list(map(int,(input(\"Please enter start x and y coordinate:- \").split())))\r\n goal_x,goal_y = list(map(int,(input(\"Please enter goal x and y coordinate:- \").split())))\r\n global start\r\n global goal\r\n start = (round(start_x),round(start_y))\r\n goal = (round(goal_x),round(goal_y))\r\n\r\n#Creating the workspace\r\ndef generate_obstacles(robot_radius, clearance):\r\n margin = robot_radius + clearance\r\n workspace = np.zeros(shape=(int(303),int(203)))\r\n ox = []\r\n oy = []\r\n obstacle_space = []\r\n \r\n #create a border around the map (not obstacle)\r\n for i in range(round(302)):\r\n ox.append(i)\r\n oy.append(-1)\r\n obstacle_space.append([i,-1])\r\n\r\n ox.append(i)\r\n oy.append(201)\r\n obstacle_space.append([i,201])\r\n\r\n for i in range(round(202)):\r\n ox.append(-1)\r\n oy.append(i)\r\n obstacle_space.append([-1,i])\r\n\r\n ox.append(301)\r\n oy.append(i)\r\n obstacle_space.append([301,i])\r\n\r\n # add obstacles to the map\r\n for x in range(301):\r\n for y in range(201):\r\n # feasible workspace considering robot radius and clearance\r\n if (y - margin <= 0) or (x - margin <= 0) or (y - (200-margin) >= 0) or (x - (300-margin) >= 0):\r\n obstacle_space.append([x,y])\r\n # circle\r\n if (x - 225)**2 + (y - 150)**2 <= (25 + margin)**2:\r\n obstacle_space.append([x,y])\r\n \r\n # ellipse\r\n if ((x - 150)/(40 + margin))**2 + ((y - 100)/(20 + margin))**2 <= 1:\r\n obstacle_space.append([x,y])\r\n # rhombus\r\n if (5*y - 3*x + 5*(95 - margin) <= 0) and (5*y + 3*x - 5*(175 + margin) <= 0) and \\\r\n (5*y - 3*x + 5*(125 + margin) >= 0) and (5*y + 3*x - 5*(145 - margin) >= 0):\r\n obstacle_space.append([x,y])\r\n \r\n # rectangle\r\n if (5*y - 9*x - 5*(13 + margin) <= 0) and (65*y + 37*x - 5*1247 - 65*margin <= 0) and \\\r\n (5*y - 9*x + 5*(141 + margin) >= 0) and (65*y + 37*x - 5*1093 + 65*margin >= 0):\r\n obstacle_space.append([x,y])\r\n \r\n # polygon\r\n if (y <= 13*x - 140 + margin) and (y - x - 100 + margin >= 0) and \\\r\n (5*y + 7*x - 5*220 <= 0):\r\n obstacle_space.append([x,y])\r\n if (y - 185 - margin <= 0) and (5*y + 7*x - 5*(290 + margin) <= 0) and \\\r\n (5*y - 6*x - 5*(30 - margin) >= 0) and (5*y + 6*x - 5*(210 - margin) >= 0) and \\\r\n (5*y + 7*x - 5*(220 - margin) >= 0):\r\n obstacle_space.append([x,y])\r\n\r\n for i in obstacle_space:\r\n x = i[0]\r\n y = i[1]\r\n workspace[x][y] = 1\r\n\r\n #return obstacle_space\r\n x_obs=[col[0] for col in obstacle_space]\r\n y_obs=[col[1] for col in obstacle_space]\r\n plt.scatter(ox,oy,color = 'r')\r\n plt.scatter(x_obs,y_obs,color = 'r')\r\n Final_obs = []\r\n for i in range(302):\r\n Final_obs.append(workspace[i])\r\n return x_obs,y_obs,obstacle_space,Final_obs\r\n\r\n#Set the action steps along with the cost2come\r\ndef Make_Movements():\r\n steps = [[1,0,1],\r\n [0,1,1],\r\n [-1,0,1],\r\n [0,-1,1],\r\n [1,1,math.sqrt(2)],\r\n [1,-1,math.sqrt(2)],\r\n [-1,-1,math.sqrt(2)],\r\n [-1,1,math.sqrt(2)]]\r\n return steps\r\n\r\n#Backtracking to get the path from the goal node to the final node\r\ndef Back_Track(my_List):\r\n backtrack = []\r\n l = len(my_List)\r\n current_pos = my_List[l-1][1]\r\n backtrack.append(current_pos)\r\n parent = my_List[l-1][2]\r\n while parent != None:\r\n for i in range(l):\r\n X = my_List[i]\r\n if X[1] == parent:\r\n parent = X[2]\r\n current_pos = X[1]\r\n backtrack.append(current_pos)\r\n return backtrack[::-1]\r\n\r\n#Dijkstra Search\r\ndef dijkstra_algorithm(start,goal, workspace):\r\n start_node = (0,start,None)\r\n goal_node = (0,goal,None)\r\n movements = Make_Movements()\r\n heapq.heappush(open_list,(start_node))\r\n workspace[start_node[1][0]][start_node[1][1]] = 1\r\n\r\n while len(open_list)>0:\r\n #Storing the visted nodes\r\n current_node = heapq.heappop(open_list)\r\n heapq.heappush(closed_list,current_node)\r\n plotx.append(current_node[1][0])\r\n ploty.append(current_node[1][1])\r\n\r\n #Visualizing the explored nodes\r\n if len(ploty)%1000 == 0:\r\n plt.plot(start[0], start[1], \"yo\")\r\n plt.plot(goal[0], goal[1], \"yo\")\r\n plt.plot(plotx,ploty, '.k')\r\n plt.pause(0.001)\r\n\r\n if current_node[1] == goal_node[1] :\r\n print('goal coordinates found')\r\n final_path = Back_Track(closed_list)\r\n return final_path\r\n \r\n #Exploring the nodes\r\n for new_position in movements:\r\n node_pos = (current_node[1][0] + new_position[0],\r\n current_node[1][1] + new_position[1])\r\n node_position_cost = current_node[0] + new_position[2]\r\n node_parent = current_node[1]\r\n minx = 0\r\n miny = 0\r\n maxy = (len(workspace) - 1)\r\n maxx = (len(workspace[0]) -1)\r\n if node_pos[0] > maxy or node_pos[0] < miny or node_pos[1] > maxx or node_pos[1] < minx or workspace[node_pos[0]][node_pos[1]] != 0:\r\n continue\r\n workspace[node_pos[0]][node_pos[1]] = 1\r\n new_node = (node_position_cost,node_pos,node_parent)\r\n heapq.heappush(open_list,(new_node))\r\n\r\n\r\n#Main program\r\nTake_Input()\r\nrobot_radius = float(input(\"Enter radius of the rigid robot:- \"))\r\nclearance = float(input(\"Enter clearance value:- \"))\r\n\r\nx_obs,y_obs,obstacle_space,Final_obs = generate_obstacles(robot_radius, clearance)\r\nif start in (zip(x_obs,y_obs)):\r\n print(\"Start node Invalid or in Obstacle Space\")\r\n\r\nelif goal in (zip(x_obs,y_obs)):\r\n print(\"Goal node Invalid or in Obstacle Space\")\r\n\r\nelse:\r\n Generate_Path = dijkstra_algorithm(start,goal,Final_obs)\r\n plt.plot(plotx,ploty, '.k')\r\n if Generate_Path!= None:\r\n x = [x[0] for x in Generate_Path]\r\n y = [x[1] for x in Generate_Path]\r\n plt.plot(x,y,color = 'c',linewidth = 4)\r\n elapsed_time = time.time() - start_time\r\n print(\"Time elapsed-> \", elapsed_time)\r\n else:\r\n print(\"Path not found\")\r\n","sub_path":"dijkstra_rigid.py","file_name":"dijkstra_rigid.py","file_ext":"py","file_size_in_byte":6901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"192750443","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n***************************************************************************\n NumberInputPanel.py\n ---------------------\n Date : August 2012\n Copyright : (C) 2012 by Victor Olaya\n Email : volayaf at gmail dot com\n***************************************************************************\n* *\n* This program is free software; you can redistribute it and/or modify *\n* it under the terms of the GNU General Public License as published by *\n* the Free Software Foundation; either version 2 of the License, or *\n* (at your option) any later version. *\n* *\n***************************************************************************\n\"\"\"\n\n__author__ = 'Victor Olaya'\n__date__ = 'August 2012'\n__copyright__ = '(C) 2012, Victor Olaya'\n\n# This will get replaced with a git SHA1 when you do a git archive\n\n__revision__ = '$Format:%H$'\n\nimport os\n\nfrom PyQt4 import uic\nfrom PyQt4.QtCore import pyqtSignal\n\nfrom math import log10, floor\nfrom processing.gui.NumberInputDialog import NumberInputDialog\n\npluginPath = os.path.split(os.path.dirname(__file__))[0]\nWIDGET, BASE = uic.loadUiType(\n os.path.join(pluginPath, 'ui', 'widgetNumberSelector.ui'))\n\n\nclass NumberInputPanel(BASE, WIDGET):\n\n hasChanged = pyqtSignal()\n\n def __init__(self, number, minimum, maximum, isInteger):\n super(NumberInputPanel, self).__init__(None)\n self.setupUi(self)\n\n self.isInteger = isInteger\n if self.isInteger:\n self.spnValue.setDecimals(0)\n else:\n #Guess reasonable step value\n if (maximum == 0 or maximum) and (minimum == 0 or minimum):\n self.spnValue.setSingleStep(self.calculateStep(minimum, maximum))\n\n if maximum == 0 or maximum:\n self.spnValue.setMaximum(maximum)\n else:\n self.spnValue.setMaximum(99999999)\n if minimum == 0 or minimum:\n self.spnValue.setMinimum(minimum)\n else:\n self.spnValue.setMinimum(-99999999)\n\n #Set default value\n if number == 0 or number:\n self.spnValue.setValue(float(number))\n self.spnValue.setClearValue(float(number))\n elif minimum == 0 or minimum:\n self.spnValue.setValue(float(minimum))\n self.spnValue.setClearValue(float(minimum))\n else:\n self.spnValue.setValue(0)\n self.spnValue.setClearValue(0)\n\n self.btnCalc.clicked.connect(self.showNumberInputDialog)\n\n self.spnValue.valueChanged.connect(lambda: self.hasChanged.emit())\n\n def showNumberInputDialog(self):\n dlg = NumberInputDialog(self.isInteger)\n dlg.exec_()\n if dlg.value is not None:\n self.spnValue.setValue(dlg.value)\n\n def getValue(self):\n return self.spnValue.value()\n\n def calculateStep(self, minimum, maximum):\n valueRange = maximum - minimum\n if valueRange <= 1.0:\n step = valueRange / 10.0\n # round to 1 significant figure\n return round(step, -int(floor(log10(step))))\n else:\n return 1.0\n","sub_path":"processing/gui/NumberInputPanel.py","file_name":"NumberInputPanel.py","file_ext":"py","file_size_in_byte":3322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"493673104","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\n\nimport mock\n\nfrom ironic_inspector import node_cache\nfrom ironic_inspector.plugins import extra_hardware\nfrom ironic_inspector.test import base as test_base\n\n\n@mock.patch.object(extra_hardware.swift, 'SwiftAPI', autospec=True)\n@mock.patch.object(node_cache.NodeInfo, 'patch')\nclass TestExtraHardware(test_base.NodeTest):\n hook = extra_hardware.ExtraHardwareHook()\n\n def test_data_recieved(self, patch_mock, swift_mock):\n introspection_data = {\n 'data': [['memory', 'total', 'size', '4294967296'],\n ['cpu', 'physical', 'number', '1'],\n ['cpu', 'logical', 'number', '1']]}\n data = json.dumps(introspection_data['data'])\n self.hook.before_processing(introspection_data)\n self.hook.before_update(introspection_data, self.node_info)\n\n swift_conn = swift_mock.return_value\n name = 'extra_hardware-%s' % self.uuid\n swift_conn.create_object.assert_called_once_with(name, data)\n patch_mock.assert_called_once_with(\n [{'op': 'add', 'path': '/extra/hardware_swift_object',\n 'value': name}])\n\n expected = {\n 'memory': {\n 'total': {\n 'size': 4294967296\n }\n },\n 'cpu': {\n 'physical': {\n 'number': 1\n },\n 'logical': {\n 'number': 1\n },\n }\n }\n\n self.assertEqual(expected, introspection_data['extra'])\n\n def test_data_not_in_edeploy_format(self, patch_mock, swift_mock):\n introspection_data = {\n 'data': [['memory', 'total', 'size', '4294967296'],\n ['cpu', 'physical', 'number', '1'],\n {'interface': 'eth1'}]}\n data = json.dumps(introspection_data['data'])\n self.hook.before_processing(introspection_data)\n self.hook.before_update(introspection_data, self.node_info)\n\n swift_conn = swift_mock.return_value\n name = 'extra_hardware-%s' % self.uuid\n swift_conn.create_object.assert_called_once_with(name, data)\n patch_mock.assert_called_once_with(\n [{'op': 'add', 'path': '/extra/hardware_swift_object',\n 'value': name}])\n\n self.assertNotIn('data', introspection_data)\n\n def test_no_data_recieved(self, patch_mock, swift_mock):\n introspection_data = {'cats': 'meow'}\n swift_conn = swift_mock.return_value\n self.hook.before_processing(introspection_data)\n self.hook.before_update(introspection_data, self.node_info)\n self.assertFalse(patch_mock.called)\n self.assertFalse(swift_conn.create_object.called)\n\n def test__convert_edeploy_data(self, patch_mock, swift_mock):\n introspection_data = [['Sheldon', 'J.', 'Plankton', '123'],\n ['Larry', 'the', 'Lobster', None],\n ['Eugene', 'H.', 'Krabs', 'The cashier']]\n\n data = self.hook._convert_edeploy_data(introspection_data)\n expected_data = {'Sheldon': {'J.': {'Plankton': 123}},\n 'Larry': {'the': {'Lobster': None}},\n 'Eugene': {'H.': {'Krabs': 'The cashier'}}}\n self.assertEqual(expected_data, data)\n","sub_path":"ironic_inspector/test/unit/test_plugins_extra_hardware.py","file_name":"test_plugins_extra_hardware.py","file_ext":"py","file_size_in_byte":3856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"257124111","text":"\ndef solve(line):\n audience = line.split()[1]\n required = 0\n current = 0\n for i in xrange(len(audience)):\n if audience[i] == 0:\n continue\n if current >= i:\n current += int(audience[i])\n else:\n required += i - current\n current += i - current + int(audience[i])\n return required\n\n\nif __name__ == \"__main__\":\n testcases = input()\n\n for case in xrange(1, testcases+1):\n line = raw_input()\n print(\"Case #%i: %s\" % (case, solve(line)))\n","sub_path":"solutions_python/Problem_155/999.py","file_name":"999.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"391542185","text":"import math\nimport seamonsters as sea\n\nclass PathFollower:\n \"\"\"\n Controls a SuperHolonomicDrive to follow paths on the field.\n \"\"\"\n\n NAVX_LAG = 7 # frames\n NAVX_ERROR_CORRECTION = 0.1 # out of 1\n\n def __init__(self, drive, ahrs=None):\n \"\"\"\n :param drive: a SuperHolonomicDrive\n :param x: starting x position of the robot, in feet\n :param y: starting y position of the robot, in feet\n :param angle: starting angle of the robot, radians.\n 0 means the robot's local XY coordinates line up with the field XY\n coordinates.\n :param ahrs: an optional AHRS (NavX) instance. If provided, this will\n be used to track the robot's rotation; if not, the rotation will\n be calculated based on the movement of the motors.\n \"\"\"\n self.drive = drive\n self._drivePositionState = None\n self.ahrs = ahrs\n self._ahrsOrigin = 0\n if ahrs is not None:\n self._ahrsOrigin = self._getAHRSAngle()\n self.robotX = 0\n self.robotY = 0\n self.robotAngle = 0\n\n self._robotAngleHistory = []\n\n def setPosition(self, x, y, angle):\n self.robotX = x\n self.robotY = y\n if angle is not None:\n self.robotAngle = angle\n if self.ahrs is not None:\n self._ahrsOrigin = 0\n self._ahrsOrigin = self._getAHRSAngle() - angle\n self._robotAngleHistory.clear()\n self._drivePositionState = None\n\n def _getAHRSAngle(self):\n return -math.radians(self.ahrs.getAngle()) - self._ahrsOrigin\n\n def waitForOrientWheelsGenerator(self, magnitude, direction, turn):\n \"\"\"\n Orient wheels to prepare to drive with the given mag/dir/turn.\n \"\"\"\n if magnitude == 0 and turn == 0:\n return\n for _ in range(0, 10):\n self.drive.orientWheels(magnitude, direction, turn)\n yield\n\n def updateRobotPosition(self):\n moveDist, moveDir, moveTurn, self._drivePositionState = \\\n self.drive.getRobotPositionOffset(self._drivePositionState, target=True)\n\n self.robotAngle += moveTurn\n self._robotAngleHistory.append(self.robotAngle)\n # pretty sure this isn't off by 1\n if len(self._robotAngleHistory) >= PathFollower.NAVX_LAG:\n laggedAngle = self._robotAngleHistory.pop(0)\n if self.ahrs is not None:\n navxAngle = self._getAHRSAngle()\n error = (navxAngle - laggedAngle) * PathFollower.NAVX_ERROR_CORRECTION\n self.robotAngle += error\n for i in range(0, len(self._robotAngleHistory)):\n self._robotAngleHistory[i] += error\n\n self.robotX += math.cos(moveDir + self.robotAngle) * moveDist\n self.robotY += math.sin(moveDir + self.robotAngle) * moveDist\n\n def driveToPointGenerator(self, x, y, angle, time,\n robotPositionTolerance=0, robotAngleTolerance=0):\n \"\"\"\n A generator to drive to a location on the field while simultaneously\n pointing the robot in a new direction. This will attempt to move the\n robot at a velocity so it reaches the target position angle in ``time``\n seconds. This generator never exits, but yields ``True`` or ``False``\n if the robot is close enough to its target position, within tolerance.\n\n If ``time`` is zero, the robot will attempt to move to the position as\n fast as possible.\n\n Position mode is recommended!\n \"\"\"\n dist, moveDir = self._robotVectorToPoint(x, y)\n aDiff = angle - self.robotAngle\n # actual velocities don't matter for orientWheels as long as the ratios\n # are correct\n yield from self.waitForOrientWheelsGenerator(dist, moveDir, aDiff)\n for wheel in self.drive.wheels:\n wheel.resetPosition()\n\n if dist < 0.1: # TODO: constant\n dist = 0\n if abs(aDiff) < math.radians(1): # TODO\n aDiff = 0\n targetMag = 0\n targetAVel = 0\n if time != 0:\n targetMag = dist / time\n targetAVel = aDiff / time\n\n accel = 0\n while True:\n accel += 0.1\n if accel > 1:\n accel = 1\n\n self.updateRobotPosition()\n\n dist, dir = self._robotVectorToPoint(x, y)\n aDiff = angle - self.robotAngle\n\n # is the robot close enough to the target position to reach it in\n # the next iteration?\n atPosition = targetMag == 0 or dist < targetMag / sea.ITERATIONS_PER_SECOND\n if atPosition:\n mag = dist * sea.ITERATIONS_PER_SECOND\n else:\n mag = targetMag\n atAngle = targetAVel == 0 or abs(aDiff) < abs(targetAVel / sea.ITERATIONS_PER_SECOND)\n if atAngle:\n aVel = aDiff * sea.ITERATIONS_PER_SECOND\n else:\n aVel = abs(targetAVel)\n if aDiff < 0:\n aVel = -aVel\n\n self.drive.drive(mag * accel, dir, aVel * accel)\n yield (atPosition or dist <= robotPositionTolerance) \\\n and (atAngle or abs(aDiff) <= robotAngleTolerance)\n\n # return magnitude, direction\n def _robotVectorToPoint(self, x, y):\n xDiff = x - self.robotX\n yDiff = y - self.robotY\n return (math.sqrt(xDiff ** 2 + yDiff ** 2),\n math.atan2(yDiff, xDiff) - self.robotAngle)\n\n def _readDataLine(self, line):\n return (float(n) for n in line)\n\n def followPathData(self, data):\n \"\"\"\n Follow path data read from a file. ``data`` should be a list of line\n tuples returned by ``sea.readDataFile``.\n \"\"\"\n lastTime, lastX, lastY, lastAngle = self._readDataLine(data[0])\n self.setPosition(lastX, lastY, math.radians(lastAngle))\n for point in data[1:]:\n t, x, y, angle = self._readDataLine(point)\n if lastX == x and lastY == y and lastAngle == angle:\n yield from sea.wait(int((t - lastTime) * sea.ITERATIONS_PER_SECOND))\n else:\n yield from sea.untilTrue(\n self.driveToPointGenerator(x, y, math.radians(angle),\n t - lastTime))\n lastTime = t\n lastX = x\n lastY = y\n lastAngle = angle\n","sub_path":"seamonsters/pathFollower.py","file_name":"pathFollower.py","file_ext":"py","file_size_in_byte":6420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"511680742","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef main():\n drawBoxPlot()\n\n\ndef randomDraw():\n x = np.random.randint(2, size=100)\n ones = np.count_nonzero(x)\n zeros = len(x) - ones\n print(\"ones: \", ones)\n print(\"zeros: \", zeros)\n fig, ax = plt.subplots()\n ax.hist(np.array((zeros, ones)), bins=range(2))\n plt.show()\n print(x[0])\n\n\ndef probDraw():\n x = np.arange(1, 6)\n y = np.array([12, 17, 15, 7, 20])\n # print(np.sort(y))\n # print(np.mean(y))\n fig, ax = plt.subplots(4, 1)\n ax[0].bar(x, y)\n print(y)\n ax[1].hist(y, bins=range(y.min(), y.max() + 1))\n ax[2].hist(np.sort(y), bins=range(y.min(), y.max() + 1))\n ax[3].hist(y, bins=x + np.mean(y))\n # ax[2].plot(x, y)\n plt.show()\n\n\ndef histogramExample():\n x = np.random.randint(2, size=100)\n y = np.arange(0, 101)\n print(x)\n print(y)\n fig, ax = plt.subplots()\n ax.hist(x, bins=y)\n plt.show()\n\ndef drawBoxPlot():\n list = [4,5,7,7,7,8,10,11,11,13,13,14] # [4,5,7,7,7,8,10,11,11,13,13,14]\n fig, ax = plt.subplots()\n ax.boxplot(list, vert=False, patch_artist=True,)\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Other/Prob.py","file_name":"Prob.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"79303233","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#########################################################################\n# File Name: get_pop_res.py\n# Author: vincent\n# mail: esunward@live.com\n# Created Time: 2017-12-09 09:43:08\n#########################################################################\n\nfrom __future__ import print_function\n\nfrom collections import Counter\n\nc = Counter()\nwith open('access.log') as fd:\n for line in fd:\n c[line.split()[6]] += 1\n\npopular_resources = c.most_common(10)\n\nfor url,count in popular_resources:\n print(url, count)\n","sub_path":"PFLSAAO/chapter_4_文本处理/src/get_pop_res.py","file_name":"get_pop_res.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"23324076","text":"from selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.common.exceptions import NoSuchElementException, TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\nPOLL_FREQUENCY = 0.25\n\n\ndef ac_click(driver, element):\n return ActionChains(driver).move_to_element(element).click().perform()\n\n\ndef get_element(driver, selector, raise_error=True, wait_sec=None):\n try:\n if wait_sec:\n return WebDriverWait(driver, wait_sec, POLL_FREQUENCY)\\\n .until(EC.presence_of_element_located((By.XPATH, selector)))\n else:\n return driver.find_element_by_xpath(selector)\n\n except (NoSuchElementException, TimeoutException) as e:\n if raise_error:\n raise e\n\n\ndef get_field(driver, field_spec, option=None, field_type='input'):\n \"\"\"\n Try to find input by name attribute or\n corresponding label text\n \"\"\"\n\n input_field = get_element(\n driver,\n f'//{field_type}[@name=\"{field_spec}\"]',\n wait_sec=3,\n raise_error=False\n )\n\n if input_field is None:\n label = get_element(\n driver,\n f'//label[contains(text(), \"{field_spec}\")] | \\\n //*[contains(text(), \"{field_spec}\")]/parent::label',\n raise_error=False\n )\n\n assert label is not None, f'Field for spec \"{field_spec}\" not found'\n\n input_id = label.get_attribute('for')\n input_field = get_element(driver, f'//{field_type}[@id=\"{input_id}\"]')\n\n return input_field\n\n\ndef select_option(driver, field_spec, option):\n get_field(driver, field_spec, field_type='select').click()\n\n\ndef is_recapthca_sent(driver):\n return driver.execute_script('return grecaptcha.getResponse().length > 0')\n\n\ndef wait_recapthca(driver, wait_sec):\n return WebDriverWait(driver, wait_sec).until(is_recapthca_sent)\n","sub_path":"features/steps/helpers/dom_helper.py","file_name":"dom_helper.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"474729608","text":"import os\nimport sys\nimport cv2\nimport time\nimport math\nimport scipy.io as sio\nimport random\nsys.path.append(r\"/home/shushi/projects/3face/face_api/src\")\nfrom common.api_baidu import Baidu\nfrom utils.base import draw_face_rectangle_baidu\n\n\npath = '/home/shushi/Pic/idealtest/idealtest/'\noutput_path = '../result/campare_diff/baidu/'\nbaidu = Baidu()\nif not os.path.exists(output_path):\n os.makedirs(output_path)\nfile_name = output_path+\"compare.txt\"\nresult_file = open(file_name, 'a+')\n\nfile_paths = os.listdir(path)\nnum = 300\nj = 0\nconfidence = 0.0\nwhile j < num:\n ran1 = random.randint(0, 122)\n ran2 = random.randint(0, 122)\n while ran2 == ran1:\n ran2 = random.randint(0, 122)\n ran3 = random.randint(0, 36)\n ran4 = random.randint(0, 36)\n\n file_path1 = path + file_paths[ran1]+\"/\"\n file_path2 = path + file_paths[ran2]+\"/\"\n\n\n files1 = os.listdir(file_path1)\n files2 = os.listdir(file_path2)\n file1 = file_path1+files1[ran3]\n file2 = file_path2+files2[ran4]\n\n result = baidu.compare(file1, file2)\n print(file1,file2)\n try:\n result_num = result[\"result_num\"]\n if result_num != 0:\n faces = result['result'][0]\n buf = file1 + \" vs \" + file2 + \"\\n\" + \"score: \" + str(faces[\"score\"]) + \"\\n\"\n confidence += faces[\"score\"]\n result_file.writelines(buf)\n j += 1\n else:\n buf = file1 + \" vs \" + file2 + \" : no face\" + \"\\n\"\n result_file.writelines(buf)\n except:\n print(\"error\")\n if result[\"error_code\"]==18:\n continue\n else:\n buf = file1 + \" vs \" + file2 + \" \" + result[\"error_code\"] + \"\\n\"\n result_file.writelines(buf)\nprint(\"confidence: \"+str(confidence/num))\nresult_file.flush()\nresult_file.close()\n","sub_path":"src/test_compare_diff/test_baidu.py","file_name":"test_baidu.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"293969829","text":"from uuid import UUID\nfrom datetime import datetime\n\nimport pytest\nfrom packaging import version\n\nfrom py_balancer_manager import BalancerManager, BalancerManagerError, Client, Cluster, MultipleExceptions, Route\nfrom py_balancer_manager.helpers import TrafficData\nfrom py_balancer_manager.status import Statuses, Status\n\n\ndef test_properties(balancer_manager):\n assert type(balancer_manager.client) is Client\n assert type(balancer_manager.date) is datetime\n assert isinstance(balancer_manager.httpd_version, version._BaseVersion)\n assert type(balancer_manager.httpd_built_date) is datetime\n assert isinstance(balancer_manager.openssl_version, version._BaseVersion)\n assert type(balancer_manager.holistic_error_status) is bool\n\n for cluster in balancer_manager.clusters:\n assert type(cluster.balancer_manager) is BalancerManager\n assert cluster.max_members is None or type(cluster.max_members) == int\n assert cluster.max_members_used is None or type(cluster.max_members_used) == int\n assert type(cluster.sticky_session) is str or cluster.sticky_session is False\n assert cluster.disable_failover is None or type(cluster.disable_failover) == bool\n assert type(cluster.timeout) is int\n assert type(cluster.failover_attempts) == int\n assert type(cluster.method) is str\n assert cluster.path is None or type(cluster.path) == str\n assert cluster.active is None or type(cluster.active) == bool\n\n for route in cluster.routes:\n assert type(route) is Route\n assert type(route.cluster) is Cluster\n assert type(route.worker) is str\n assert type(route.name) is str\n assert type(route.priority) is int\n assert type(route.route_redir) is str\n assert type(route.factor) is float\n assert type(route.lbset) is int\n assert type(route.elected) is int\n assert route.busy is None or type(route.busy) is int\n assert route.load is None or type(route.load) is int\n assert type(route.traffic_to) is TrafficData\n assert type(route.traffic_from) is TrafficData\n assert type(route.session_nonce_uuid) is UUID\n assert type(route._status) is Statuses\n assert type(route._status.ok) is Status\n assert type(route._status.error) is Status\n for status_name in route.mutable_statuses():\n assert type(route.status(status_name)) is Status\n\n\n@pytest.mark.asyncio\nasync def test_properties_without_lxml(balancer_manager):\n _saved_use_lxml = balancer_manager.client.use_lxml\n try:\n balancer_manager.client.use_lxml = False\n async with balancer_manager:\n await balancer_manager.update()\n test_properties(balancer_manager)\n finally:\n balancer_manager.client.use_lxml = _saved_use_lxml\n\n\ndef test_version(balancer_manager, httpd_version):\n assert balancer_manager.httpd_version == httpd_version\n\n\n@pytest.mark.asyncio\nasync def test_date(balancer_manager):\n \"\"\" confirm the date attribute is updated \"\"\"\n first_datetime = balancer_manager.date\n async with balancer_manager.client:\n await balancer_manager.update()\n last_datetime = balancer_manager.date\n assert first_datetime < last_datetime\n\n\n@pytest.mark.asyncio\nasync def test_cluster_does_not_exist(balancer_manager):\n with pytest.raises(BalancerManagerError) as excinfo:\n balancer_manager.cluster('does_not_exist')\n assert 'could not locate cluster name in list of clusters: does_not_exist' in str(excinfo.value)\n\n\n@pytest.mark.asyncio\nasync def test_route_status_changes(balancer_manager):\n async with balancer_manager.client:\n route = balancer_manager.cluster('cluster0').route('route00')\n for status_name in route.mutable_statuses():\n status_value = route.status(status_name).value\n\n # toggle status to the oposite value\n kwargs = {status_name: not status_value}\n\n # continue with route testing\n await route.edit(**kwargs)\n\n # assert new status value\n assert route.status(status_name).value is not status_value\n\n # toggle status back to original value\n await route.edit(**{\n 'force': True,\n status_name: status_value\n })\n\n # assert original status value\n assert route.status(status_name).value is status_value\n\n\n@pytest.mark.asyncio\nasync def test_cluster_lbsets(httpd_instance, balancer_manager):\n async with balancer_manager.client:\n cluster = balancer_manager.cluster('cluster4')\n lbsets = cluster.lbsets()\n assert len(lbsets) == 2\n assert len(lbsets[0]) == 5\n assert len(lbsets[1]) == 5\n\n assert cluster.active_lbset == 0\n\n # test bad lbset number\n with pytest.raises(BalancerManagerError) as excinfo:\n cluster.lbset(99)\n assert 'lbset does not exist: 99' in str(excinfo.value)\n\n # verify before change\n for route in cluster.routes:\n assert route.status('disabled').value is False\n\n # do change\n await cluster.edit_lbset(1, disabled=True)\n # verify after change\n for route in cluster.lbset(1):\n assert route.status('disabled').value is True\n # verify active lbset\n assert cluster.active_lbset == 0\n\n # do change\n await cluster.edit_lbset(1, disabled=False)\n # verify after change\n for route in cluster.lbset(1):\n assert route.status('disabled').value is False\n # verify active lbset\n assert cluster.active_lbset == 0\n\n # do change\n await cluster.edit_lbset(0, disabled=True)\n # verify after change\n for route in cluster.lbset(0):\n assert route.status('disabled').value is True\n # verify active lbset\n assert cluster.active_lbset == 1\n\n # test an enforce that throws exceptions\n with pytest.raises(MultipleExceptions):\n try:\n httpd_instance.container.pause()\n await cluster.edit_lbset(1, disabled=True)\n finally:\n httpd_instance.container.unpause()\n\n\n@pytest.mark.asyncio\nasync def test_taking_traffic(balancer_manager):\n cluster = balancer_manager.cluster('cluster2')\n\n assert cluster.route('route20').taking_traffic is True\n assert cluster.route('route21').taking_traffic is True\n assert cluster.route('route22').taking_traffic is False\n assert cluster.route('route23').taking_traffic is False\n\n async with balancer_manager.client:\n await cluster.route('route20').edit(disabled=True, hot_standby=True)\n\n assert cluster.route('route20').taking_traffic is False\n assert cluster.route('route21').taking_traffic is True\n assert cluster.route('route22').taking_traffic is False\n assert cluster.route('route23').taking_traffic is False\n\n\n@pytest.mark.asyncio\nasync def test_route_disable_last(balancer_manager):\n async with balancer_manager.client:\n cluster = balancer_manager.cluster('cluster3')\n try:\n with pytest.raises(BalancerManagerError) as excinfo:\n for route in cluster.routes:\n await route.edit(disabled=True)\n assert 'cannot enable the \"disabled\" status for the last available route' in str(excinfo.value)\n finally:\n for route in cluster.routes:\n await route.edit(disabled=False)\n\n try:\n for route in cluster.routes:\n await route.edit(force=True, disabled=True)\n finally:\n for route in cluster.routes:\n await route.edit(disabled=False)\n\n\n@pytest.mark.asyncio\nasync def test_standby_activated(balancer_manager):\n async with balancer_manager.client:\n cluster = balancer_manager.cluster('cluster2')\n\n for route in cluster.routes:\n await route.edit(disabled=False)\n\n #assert cluster.standby_activated is False\n await cluster.route('route20').edit(disabled=True)\n await cluster.route('route21').edit(disabled=True)\n #assert cluster.standby_activated is True\n","sub_path":"test/test_balancer_manager.py","file_name":"test_balancer_manager.py","file_ext":"py","file_size_in_byte":8262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"405503547","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 27 21:30:47 2018\n\n@author: lulu\n\"\"\"\n\n\"\"\"\n打印出1到1000所有的水仙花数,所谓水仙花数是指一个三位数,其各位数字立方等于该数本身,列如:153就是一个水仙花数\n\"\"\"\nnum = [] #用一个空列表来存放数据\nfor i in range(100,1001):\n l = list(str(i)) #将字符串转换为列表的形式\n numbers = [int(x) for x in l] #将列表中的元素转换为整数型\n #print(numbers)\n \n#print(numbers)\n \n if (int(numbers[0]) ** 3) + (int(numbers[1]) ** 3) + int((numbers[2] ** 3)) == i:\n print(i,'该数为水仙花数')\n\n \n \n\n ","sub_path":"python学习基础及简单实列/找出‘水仙花数’.py","file_name":"找出‘水仙花数’.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"11795296","text":"# coding=utf-8\n\"\"\"\n农行官网原始数据处理, 添加省市区县ID, 还有部分的区县没有添加上\n\"\"\"\nfrom utils import DaoUtils\n\n__author__ = 'lyj'\n\n\nclass AbcAddPCD(object):\n __DB_TABLE_MAIN = 'db_bank_bmp'\n\n def __init__(self):\n self.dao = DaoUtils.DaoUtils()\n self.daoTemplate = DaoUtils.DaoTemplate\n\n # 初始化数据结构\n self.original_bmp_list = list()\n self.effective_bmp_list = list()\n self.province_set = set()\n self.city_set = set()\n self.db_region_1 = dict()\n self.db_region_2 = dict()\n self.db_region_3 = dict()\n\n # 读取数据库数据���内存\n self.init_data()\n\n def __del__(self):\n pass\n\n def init_data(self):\n bmp_original_arr_sql = 'select keyword, city_name, province_name, district_name, bmp_name from bank_bmp where host_url = \\'http://app.abchina.com/\\';'\n province_arr_sql = 'select distinct province_name from bank_bmp where host_url = \\'http://app.abchina.com/\\';'\n city_arr_sql = 'select distinct city_name from bank_bmp where host_url = \\'http://app.abchina.com/\\';'\n region_1_sql = 'select region_id, region_name from db_region where region_type = 1;'\n region_2_sql = 'select region_id, region_name from db_region where region_type = 2;'\n region_3_sql = 'select region_id, region_name, parent_id from db_region where region_type = 3;'\n\n for row in self.dao.db.get_all_row(bmp_original_arr_sql):\n bmp_original = {\n 'keyword': row['keyword'],\n 'city_name': row['city_name'],\n 'province_name': row['province_name'],\n 'district_name': row['district_name'],\n 'bmp_name': row['bmp_name']\n }\n self.original_bmp_list.append(bmp_original)\n\n for row in self.dao.db.get_all_row(province_arr_sql):\n self.province_set.add(row['province_name'])\n\n for row in self.dao.db.get_all_row(city_arr_sql):\n self.city_set.add(row['city_name'])\n\n for row in self.dao.db.get_all_row(region_1_sql):\n self.db_region_1[row['region_name']] = row['region_id']\n\n for row in self.dao.db.get_all_row(region_2_sql):\n self.db_region_2[row['region_name']] = row['region_id']\n\n for row in self.dao.db.get_all_row(region_3_sql):\n self.db_region_3[str(str(row['parent_id']) + row['region_name'])] = row['region_id']\n\n def handle(self):\n # 添加省份ID\n for province in self.province_set:\n province_id = self.db_region_1[province]\n for original_bmp in self.get_original_bmp_by_province(province):\n original_bmp['province_id'] = province_id\n\n # 添加城市ID\n for city in self.city_set:\n flag = 1\n city_id = self.get_region_city_id(str(city))\n for original_bmp in self.get_original_bmp_by_city(city):\n if flag == 1 and city_id == -1:\n flag = 0\n city_id = self.get_region_city_id(str(original_bmp['district_name']))\n original_bmp['city_id'] = city_id\n\n # 添加区县ID\n for original_bmp in self.original_bmp_list:\n city_id = original_bmp['city_id']\n district_name = original_bmp['district_name']\n original_bmp['district_id'] = self.db_region_3[str(str(city_id) + district_name)] if str(str(city_id) + district_name) in self.db_region_3.keys() else 0\n\n # 原始数据提取必要的属性转化为数据库数据\n self.original_to_effective()\n\n # 插入数据库\n self.dao.insert_batch_temple(AbcAddPCD.__DB_TABLE_MAIN, self.effective_bmp_list)\n\n def get_original_bmp_by_province(self, province_name):\n result_list = list()\n for original_bmp in self.original_bmp_list:\n if province_name == original_bmp['province_name']:\n result_list.append(original_bmp)\n return result_list\n\n def get_original_bmp_by_city(self, city_name):\n result_list = list()\n for original_bmp in self.original_bmp_list:\n if str(original_bmp['city_name']).startswith(city_name):\n result_list.append(original_bmp)\n return result_list\n\n def get_region_city_id(self, city_name):\n if city_name == '襄樊市':\n return 193\n if city_name == '巢湖市':\n return 3401\n for key, value in self.db_region_2.items():\n if city_name.startswith(key):\n return value\n return -1\n\n def original_to_effective(self):\n for original_bmp in self.original_bmp_list:\n obj = {\n 'keyword': original_bmp['keyword'],\n 'bmp_name': original_bmp['bmp_name'],\n 'province_id': original_bmp['province_id'],\n 'city_id': original_bmp['city_id'],\n 'district_id': original_bmp['district_id']\n }\n self.effective_bmp_list.append(obj)\n\n\nif __name__ == '__main__':\n AbcAddPCD = AbcAddPCD()\n AbcAddPCD.handle()\n","sub_path":"bank/ow/AbcAddPCD.py","file_name":"AbcAddPCD.py","file_ext":"py","file_size_in_byte":5139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"100721032","text":"# =============================================================================\n# Copyright 2020 NVIDIA. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\n# =============================================================================\n# Copyright 2019 Salesforce Research.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom\n# the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR\n# THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n# =============================================================================\n\nimport torch\n\nfrom nemo.backends.pytorch.nm import LossNM\nfrom nemo.core.neural_types import ChannelType, LabelsType, LengthsType, LogitsType, LossType, NeuralType\n\n__all__ = ['TRADEMaskedCrossEntropy', 'CrossEntropyLoss3D']\n\n\nclass TRADEMaskedCrossEntropy(LossNM):\n \"\"\"\n Neural module which implements a cross entropy for trade model with masking feature.\n\n Args:\n logits (float): output of the classifier\n targets (long): ground truth targets\n loss_mask (long): specifies the ones to get ignored in loss calculation\n\n\n \"\"\"\n\n @property\n def input_ports(self):\n \"\"\"Returns definitions of module input ports.\n\n logits: 4d tensor of logits\n\n targets: 3d tensor of labels\n\n loss_mask: specifies the words to be considered in the loss calculation\n\n \"\"\"\n return {\n # \"logits\": NeuralType(\n # {0: AxisType(BatchTag), 1: AxisType(TimeTag), 2: AxisType(ChannelTag), 3: AxisType(ChannelTag)}\n # ),\n # \"targets\": NeuralType({0: AxisType(BatchTag), 1: AxisType(ChannelTag), 2: AxisType(TimeTag)}),\n # \"loss_mask\": NeuralType({0: AxisType(BatchTag), 1: AxisType(ChannelTag)}),\n \"logits\": NeuralType(('B', 'T', 'D', 'D'), LogitsType()),\n \"targets\": NeuralType(('B', 'D', 'T'), LabelsType()),\n \"loss_mask\": NeuralType(('B', 'D'), LengthsType()),\n }\n\n @property\n def output_ports(self):\n \"\"\"Returns definitions of module output ports.\n \"\"\"\n # return {\"loss\": NeuralType(None)}\n return {\"loss\": NeuralType(elements_type=LossType())}\n\n def __init__(self):\n LossNM.__init__(self)\n\n def _loss_function(self, logits, targets, loss_mask):\n logits_flat = logits.view(-1, logits.size(-1))\n eps = 1e-10\n log_probs_flat = torch.log(torch.clamp(logits_flat, min=eps))\n target_flat = targets.view(-1, 1)\n losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat)\n losses = losses_flat.view(*targets.size())\n loss = self.masking(losses, loss_mask)\n return loss\n\n @staticmethod\n def masking(losses, mask):\n max_len = losses.size(2)\n\n mask_ = torch.arange(max_len, device=mask.device)[None, None, :] < mask[:, :, None]\n mask_ = mask_.float()\n losses = losses * mask_\n loss = losses.sum() / mask_.sum()\n return loss\n\n\nclass CrossEntropyLoss3D(LossNM):\n \"\"\"\n Neural module which implements a cross entropy loss for 3d logits.\n Args:\n num_classes (int): number of classes in a classifier, e.g. size\n of the vocabulary in language modeling objective\n logits (float): output of the classifier\n labels (long): ground truth labels\n \"\"\"\n\n @property\n def input_ports(self):\n \"\"\"Returns definitions of module input ports.\n \"\"\"\n return {\n # \"logits\": NeuralType({0: AxisType(BatchTag), 1: AxisType(ChannelTag), 2: AxisType(ChannelTag)}),\n # \"labels\": NeuralType({0: AxisType(BatchTag), 1: AxisType(ChannelTag)}),\n \"logits\": NeuralType(('B', 'D', 'D'), LogitsType()),\n \"labels\": NeuralType(('B', 'D'), LabelsType()),\n }\n\n @property\n def output_ports(self):\n \"\"\"Returns definitions of module output ports.\n \"\"\"\n # return {\"loss\": NeuralType(None)}\n return {\"loss\": NeuralType(elements_type=LossType())}\n\n def __init__(self, num_classes, **kwargs):\n LossNM.__init__(self, **kwargs)\n self._criterion = torch.nn.CrossEntropyLoss()\n self.num_classes = num_classes\n\n def _loss_function(self, logits, labels):\n logits_flatten = logits.view(-1, self.num_classes)\n labels_flatten = labels.view(-1)\n\n loss = self._criterion(logits_flatten, labels_flatten)\n return loss\n","sub_path":"nemo/collections/nlp/nm/losses/state_tracking_trade_loss.py","file_name":"state_tracking_trade_loss.py","file_ext":"py","file_size_in_byte":5880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"276286641","text":"import sys\n\n\ndef filter_files(name, func):\n with open(name) as in_file, open(name + '.out', 'w') as out_file:\n for line in in_file:\n out_file.write(func(line))\n\n\ndef filter_stream(func):\n for line in sys.stdin:\n print(func(line), end='')\n\n\nif __name__ == '__main__':\n filter_stream(lambda line: line)","sub_path":"dev/SystemProg/Filetools/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"67277389","text":"import sys\r\nimport sorter\r\n\r\n# Best Case:\tO(infinity)\r\n# Average Case:\tO(infinity)\r\n# Worst Case:\tO(infinity)\r\n# Worse Space: \tO(0)\r\n# Stable?\r\n# In-place?\r\n\r\ndef miracleSort(data):\r\n\t\"\"\" checks to see if the data is sorted \"\"\"\r\n\r\n\r\n\twhile True:\r\n\r\n\t\tisSorted = True\r\n\r\n\t\tfor i in range(len(data) - 1):\r\n\t\t\tif(data[i] > data[i + 1]):\r\n\t\t\t\tisSorted = False\r\n\r\n\t\tif(isSorted):\r\n\t\t\tbreak\r\n\r\n\treturn data\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tmiracleSorter = sorter.Sorter(sys.argv[1], miracleSort)\r\n\tmiracleSorter.run()","sub_path":"algorithms/sorting/miraclesort.py","file_name":"miraclesort.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"253395141","text":"import machine\nimport time\nfrom bme680 import BME680\nfrom io_pubsub import IOClient\n\nadc = machine.ADC(machine.Pin(35))\nadc.atten(machine.ADC.ATTN_11DB) # provides full range of 0-4095\n\nled = machine.Pin(22, machine.Pin.OUT) # LED on the board\nled.value(0)\nbme680 = BME680()\nio = IOClient()\n\nwhile True:\n battery = (adc.read() * 2 * 3.3) / 4096\n print(\"Battery: %d mV\", battery)\n\n io.update(bme680.temperature, bme680.humidity, bme680.pressure, bme680.gas, battery)\n io.publish()\n time.sleep(60)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"350876826","text":"\n\n\nimport sys, os\nsys.path.insert(0, os.path.abspath('.'))\n\nfrom os.path import expanduser\nhome = expanduser(\"~\")\n\nimport numpy as np\nimport math\nimport pickle\nimport random\nimport subprocess\nimport json\nimport random\nimport shutil\nimport time\nimport argparse\nfrom collections import deque\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\n\n\n\n\n\ndef f_vec(x, n_bins, bin_values, tail_bound, inverse=False):\n # x: [B,X]\n \n # Ignore the ones outside the interval\n inside_interval_mask = (x >= -tail_bound) & (x <= tail_bound)\n outside_interval_mask = ~inside_interval_mask\n outputs = np.zeros_like(x)\n outputs[outside_interval_mask] = x[outside_interval_mask]\n \n #Scale to [0,1]\n lower_tailbound = -tail_bound\n upper_tailbound = tail_bound\n inputs_inside_range = np.reshape(x[inside_interval_mask], [-1,1])\n x = (inputs_inside_range - lower_tailbound) / (upper_tailbound - lower_tailbound)\n B = x.shape[0]\n\n bin_intervals = np.reshape(np.linspace(0, 1, n_bins+1), [1,n_bins+1]) # [1,n_bins+1]\n cdf = np.reshape(np.cumsum(bin_values, -1),[1,n_bins+1])\n\n\n if inverse:\n\n #Figure out which bin x is in\n cdf_lower_than_x = (cdf < x).astype(float) # [B,n_bins+1]\n x_bin_idx = np.reshape((np.sum(cdf_lower_than_x, 1)-1).astype(int), [B,1]) #[B,1]\n\n # Reverse the output computation\n lower_bin_cdf = np.reshape(cdf[:,x_bin_idx], [B,1]) # [B,1]\n pos_in_bin = x - lower_bin_cdf\n pos_in_bin = pos_in_bin / bin_values[x_bin_idx+1]\n\n # Reverse the within bin computation\n bin_x_below = np.reshape(bin_intervals[:,x_bin_idx], [B,1]) # [B,1]\n bin_x_above = np.reshape(bin_intervals[:,x_bin_idx+1], [B,1]) # [B,1]\n output = pos_in_bin * (bin_x_above - bin_x_below) + bin_x_below\n\n\n\n else:\n\n #Figure out which bin x is in\n bins_lower_than_x = (bin_intervals < x).astype(float) # [B,n_bins+1] \n x_bin_idx = np.reshape((np.sum(bins_lower_than_x, 1)-1).astype(int), [B,1]) #[B,1]\n\n # Get position in bin\n bin_x_below = np.reshape(bin_intervals[:,x_bin_idx], [B,1]) # [B,1]\n bin_x_above = np.reshape(bin_intervals[:,x_bin_idx+1], [B,1]) # [B,1]\n pos_in_bin = (x- bin_x_below) / (bin_x_above - bin_x_below) # [B,1]\n\n # Compute output\n lower_sum = np.reshape(cdf[:,x_bin_idx], [B,1]) # [B,1]\n within_bin_sum = pos_in_bin * bin_values[x_bin_idx+1]\n output = lower_sum + within_bin_sum\n\n\n\n # Rescale back to tail bound space\n output = output * (upper_tailbound - lower_tailbound) + lower_tailbound\n output = np.reshape(output, [-1])\n outputs[inside_interval_mask] = output\n\n return outputs\n\n\n\n\n\n# x = np.reshape(np.array([.2, .4]), [2,1])\n\n# y = f_vec(x)\n\n# x = f()\n\n# fasdfa\n\n\n\n# print (f(0.0001))\n# # fasdfa\n\n\n# print (f(.3333))\n# # print ( .2)\n# print()\n# print (f(.5))\n# print()\n# print (f(.6666))\n# # print ( .2 + .5)\n# print ()\n# print (f(.9999))\n# # print (.2 + .5 + .3)\n# fsda\n\n\n\nif __name__ == \"__main__\":\n\n\n save_to_dir = home + \"/Documents/Flow/\"\n exp_name = 'spline_quad'\n\n\n exp_dir = save_to_dir + exp_name + '/'\n params_dir = exp_dir + 'params/'\n images_dir = exp_dir + 'images/'\n code_dir = exp_dir + 'code/'\n\n if not os.path.exists(exp_dir):\n os.makedirs(exp_dir)\n print ('Made dir', exp_dir) \n\n if not os.path.exists(params_dir):\n os.makedirs(params_dir)\n print ('Made dir', params_dir) \n\n if not os.path.exists(images_dir):\n os.makedirs(images_dir)\n print ('Made dir', images_dir) \n\n if not os.path.exists(code_dir):\n os.makedirs(code_dir)\n print ('Made dir', code_dir) \n\n\n #Save args and code\n # json_path = exp_dir+'args_dict.json'\n # with open(json_path, 'w') as outfile:\n # json.dump(args_dict, outfile, sort_keys=True, indent=4)\n subprocess.call(\"(rsync -r --exclude=__pycache__/ . \"+code_dir+\" )\", shell=True)\n\n\n torch.manual_seed(999)\n\n\n\n\n\n # Transform\n tail_bound = 1\n n_bins = 3\n W = np.array([.2,.5,.3])\n # bin_values = np.array([0,.2,.5,.3])\n V = np.array([2.,5.,3.,1.])\n Area = np.sum(.5*( np.exp(V[:-1]) + np.exp(V[1:])) * W)\n V = np.exp(V) / Area\n # print (list(V))\n # fadsfa\n\n W_positions = np.cumsum(W)\n print (W_positions)\n np.interp(.5, W_positions, fp)\n\n x = np.linspace(-tail_bound - .5, tail_bound +.5, 99)\n x = np.reshape(x, [-1,1])\n\n\n ys = f_vec(x, n_bins=n_bins, bin_values=bin_values, tail_bound=tail_bound)\n\n y_inverse = f_vec(x, n_bins=n_bins, bin_values=bin_values, tail_bound=tail_bound, inverse=True)\n\n\n # dfasad\n\n\n\n #PLOT\n rows = 1 \n cols = 1\n fig = plt.figure(figsize=(8+cols,4+rows), facecolor='white') #, dpi=150)\n # xlimits=[-3, 3]\n # ylimits=[0, .43]\n\n fontsize = 9\n \n\n # p0_mean = torch.tensor([0,0]).float()\n # p0_logvar = torch.tensor([0.8,0.8]).float()\n\n\n ax = plt.subplot2grid((rows,cols), (0,0), frameon=False)\n\n\n ax.plot(x, x, alpha=.3, ls='--')\n\n\n ax.plot(x, ys, label='f')\n # ax.set_ylim(ylimits)\n # ax.set_title('Initial Distribution', fontsize=fontsize)\n\n # ax = plt.subplot2grid((rows,cols), (0,1), frameon=False)\n ax.plot(x, y_inverse, label='f inv')\n\n plt.legend(fontsize=fontsize)\n plt.gca().set_aspect('equal', adjustable='box') \n\n plt_path = images_dir+'first.png'\n plt.savefig(plt_path)\n print ('saved plot', plt_path)\n plt.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Flow/Spline/first_quadratic.py","file_name":"first_quadratic.py","file_ext":"py","file_size_in_byte":5598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"243874650","text":"from app import app, db #import the app module and the app variable\nfrom flask import render_template\nfrom app.forms import LoginForm, RegistrationForm, ProfileUpdateForm, AdminSearchForm\nfrom flask_login import current_user, login_user, logout_user, login_required\nfrom app.models import User, UserRoles, Role\nfrom werkzeug.urls import url_parse\nimport datetime\n\n@app.route('/')\n@app.route('/index')\ndef index():\n return render_template('index.html')\n\n@app.route('/about')\ndef about():\n return render_template('about.html')\n\n@app.route('/contact')\ndef contact():\n return render_template('contact.html')\n\n@app.route('/attorneys')\ndef attorneys():\n return render_template('attorneys.html')\n\n@app.route('/what')\ndef what():\n return render_template('what.html')\n\n@app.route('/where')\ndef where():\n return render_template('where.html')\n\n@app.route('/whoweare')\ndef whoweare():\n return render_template('whoweare.html')\n\n@app.route('/portal')\n@login_required\ndef portal():\n return render_template('portal.html')\n\n@app.route('/admin/add_user', methods=['GET', 'POST'])\n@login_required\ndef admin_add():\n # check if user is actually admin\n id = current_user.id\n this_user = UserRoles.query.filter_by(user_id=id).first()\n if this_user.role_id != 7:\n return redirect(url_for('profile'))\n # create forms\n form = AddNewUserForm()\n if form.validate_on_submit():\n user = User(first_name=form.first_name.data, last_name=form.last_name.data, email=form.email.data)\n user.set_password(form.password.data)\n db.session.add(user)\n db.session.commit()\n roles = UserRoles(user_id=User.query.filter_by(email=user.email).first().id, role_id=int(form.role.data))\n db.session.add(roles)\n db.session.commit()\n flash('New user successfully created!')\n return redirect(url_for('admin'))\n\n return render_template('admin_add.html', form=form)\n\n@app.route('/admin', methods=['GET', 'POST'])\n@login_required\ndef admin():\n # check if user is actually admin\n id = current_user.id\n this_user = UserRoles.query.filter_by(user_id=id).first()\n if this_user.role_id != 7:\n return redirect(url_for('profile'))\n #create forms\n search_form = AdminSearchForm()\n update_form = AdminUpdateForm()\n delete_form = AdminDeleteForm()\n if delete_form.submit3.data and delete_form.delete.data=='REMOVE':\n # remove user\n delete_id = delete_form.user_id_delete.data\n UserRoles.query.filter_by(user_id=delete_id).delete()\n User.query.filter_by(id=delete_id).delete()\n db.session.commit()\n flash('User successfully removed.')\n return redirect(url_for('admin'))\n if search_form.submit1.data and search_form.validate():\n if search_form.email.data:\n user = User.query.filter_by(email=search_form.email.data).first()\n if user == None:\n flash('Sorry that email is not in the system.')\n return redirect(url_for('admin'))\n user = UserRoles.query.filter_by(user_id=user.id).first()\n else:\n flash('Sorry that email is not in the system.')\n return redirect(url_for('admin'))\n return render_template('admin.html', update_form=update_form, user=user, user_roles=None, search_form=None, delete_form=delete_form)\n if update_form.submit2.data and update_form.validate():\n user_id = update_form.user_id.data\n # if user id matches email of user trying to be edited, then allow the changes to occur, otherwise, don't and let them know they weren't editing the correct person, or the email already exists.\n user_role = UserRoles.query.filter_by(user_id=user_id).first()\n if user_role.user.email != update_form.email.data:\n # run a query to see if form email has same id as user being edited\n user_exists = User.query.filter_by(email=update_form.email.data).first()\n if user_exists == None:\n user_role.user.email = update_form.email.data\n user_role.role_id = int(update_form.role.data)\n user_role.user.first_name = update_form.first_name.data\n user_role.user.last_name = update_form.last_name.data\n db.session.commit()\n flash('User changes accepted!')\n return redirect(url_for('admin'))\n else:\n flash('A user with that e-mail already exists.')\n return redirect(url_for('admin'))\n else:\n user_role.role_id = int(update_form.role.data)\n user_role.user.first_name = update_form.first_name.data\n user_role.user.last_name = update_form.last_name.data\n db.session.commit()\n flash('User changes accepted!')\n return redirect(url_for('admin'))\n user_roles = UserRoles.query.all()\n return render_template('admin.html', user_roles=user_roles, search_form=search_form, update_form=None, user=None, delete_form=None)\n\n# Profile route\n@app.route('/profile', methods = ['GET', 'POST'])\n@login_required\ndef profile():\n form = ProfileUpdateForm()\n user_role = UserRoles.query.filter_by(user_id=current_user.id).first()\n if form.validate_on_submit():\n current_user.first_name = form.first_name.data\n current_user.last_name = form.last_name.data\n db.session.commit()\n flash('Thanks for updating your profile!')\n return redirect(url_for('profile'))\n return render_template('profile.html', user_role=user_role, form=form)\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n if user is None or not user.check_password(form.password.data):\n flash('Incorrect email or password. Please try again!')\n return redirect(url_for('login'))\n login_user(user, remember=form.remember_me.data)\n next_page = request.args.get('next')\n if not next_page or url_parse(next_page).netloc != '':\n next_page = url_for('index')\n return redirect(next_page)\n return render_template('login.html', title=\"Log In\", form=form)\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n form = RegistrationForm()\n if form.validate_on_submit():\n user = User(first_name=form.first_name.data, last_name=form.last_name.data, email=form.email.data)\n user.set_password(form.password.data)\n db.session.add(user)\n db.session.commit()\n if len(User.query.all()) == 1:\n roles = UserRoles(user_id=User.query.filter_by(email=user.email).first().id, role_id=7)\n else:\n roles = UserRoles(user_id=User.query.filter_by(email=user.email).first().id, role_id=5)\n db.session.add(roles)\n db.session.commit()\n flash('Your account has been successfully created!')\n return redirect(url_for('login'))\n return render_template('login.html', title=\"Register\", form=form)\n\n@app.before_request\ndef before_request():\n if current_user.is_authenticated:\n current_user.last_login = datetime.datetime.utcnow()\n current_user.active = True\n db.session.commit()\n\n@app.route('/logout')\ndef logout():\n current_user.active = False\n logout_user()\n return redirect(url_for('login'))\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":7558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"646618036","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport copy\nimport datetime\nimport common.format_\nfrom google.appengine.ext import ndb\nfrom google.appengine.api import search\nfrom google.appengine.ext import deferred\n\n\n# ACHTUNG! Neue Models müssen auch in den Backup-Cron-Job eingetragen werden!\n\n\ndef age_years(birthday, basedate = None):\n \"\"\"\n Gibt das Alter in Jahren zurück.\n\n Wird kein Basidatum angegeben, dann wird das Alter zum heutigen Tag\n berechnet.\n \"\"\"\n\n if not basedate:\n basedate = datetime.date.today()\n\n years = basedate.year - birthday.year\n months = basedate.month - birthday.month\n days = basedate.day - birthday.day\n\n if days < 0:\n months -= 1\n\n if months < 0:\n years -= 1\n\n return years\n\n\n# class DateTimePropertySerializable(ndb.DateTimeProperty):\n#\n# def _get_for_dict(self, entity):\n#\n# value = self._get_value(entity)\n#\n# if value:\n# return value.isoformat()\n# else:\n# return value\n#\n# def _validate(self, value):\n# if isinstance(value, basestring):\n# value = common.format_.string_to_datetime(value)\n# ndb.DateTimeProperty._validate(self, value)\n#\n#\n# def _db_set_value(self, v, p, value):\n# if isinstance(value, basestring):\n# value = common.format_.string_to_datetime(value)\n# ndb.DateTimeProperty._db_set_value(self, v, p, value)\n\n\nclass Tel(ndb.Model):\n uid = ndb.StringProperty(required = True)\n label = ndb.StringProperty()\n number = ndb.StringProperty(required = True)\n\n ct = ndb.DateTimeProperty(required = True, verbose_name = u\"creation_timestamp\")\n cu = ndb.StringProperty(required = True, verbose_name = u\"creation_user\")\n et = ndb.DateTimeProperty(required = True, verbose_name = u\"edit_timestamp\")\n eu = ndb.StringProperty(required = True, verbose_name = u\"edit_user\")\n\n\nclass Email(ndb.Model):\n uid = ndb.StringProperty(required = True)\n label = ndb.StringProperty()\n email = ndb.StringProperty(required = True)\n\n ct = ndb.DateTimeProperty(required = True, verbose_name = u\"creation_timestamp\")\n cu = ndb.StringProperty(required = True, verbose_name = u\"creation_user\")\n et = ndb.DateTimeProperty(required = True, verbose_name = u\"edit_timestamp\")\n eu = ndb.StringProperty(required = True, verbose_name = u\"edit_user\")\n\n\nclass Url(ndb.Model):\n uid = ndb.StringProperty(required = True)\n label = ndb.StringProperty()\n url = ndb.StringProperty(required = True)\n\n ct = ndb.DateTimeProperty(required = True, verbose_name = u\"creation_timestamp\")\n cu = ndb.StringProperty(required = True, verbose_name = u\"creation_user\")\n et = ndb.DateTimeProperty(required = True, verbose_name = u\"edit_timestamp\")\n eu = ndb.StringProperty(required = True, verbose_name = u\"edit_user\")\n\n\nclass Note(ndb.Model):\n uid = ndb.StringProperty(required = True)\n text = ndb.TextProperty(required = True)\n\n ct = ndb.DateTimeProperty(required = True, verbose_name = u\"creation_timestamp\")\n cu = ndb.StringProperty(required = True, verbose_name = u\"creation_user\")\n et = ndb.DateTimeProperty(required = True, verbose_name = u\"edit_timestamp\")\n eu = ndb.StringProperty(required = True, verbose_name = u\"edit_user\")\n\n\nclass Agreement(ndb.Model):\n uid = ndb.StringProperty(required = True)\n text = ndb.TextProperty(required = True)\n\n ct = ndb.DateTimeProperty(required = True, verbose_name = u\"creation_timestamp\")\n cu = ndb.StringProperty(required = True, verbose_name = u\"creation_user\")\n et = ndb.DateTimeProperty(required = True, verbose_name = u\"edit_timestamp\")\n eu = ndb.StringProperty(required = True, verbose_name = u\"edit_user\")\n\n\nclass JournalItem(ndb.Model):\n\n uid = ndb.StringProperty(required = True)\n date_time = ndb.DateTimeProperty()\n text = ndb.TextProperty(required = True)\n\n ct = ndb.DateTimeProperty(required = True, verbose_name = u\"creation_timestamp\")\n cu = ndb.StringProperty(required = True, verbose_name = u\"creation_user\")\n et = ndb.DateTimeProperty(required = True, verbose_name = u\"edit_timestamp\")\n eu = ndb.StringProperty(required = True, verbose_name = u\"edit_user\")\n\n\n def set_date_time_iso(self, value):\n \"\"\"\n Übernimmt einen ISO-Datumsstring und konvertiert diesen nach DateTime\n \"\"\"\n\n self.date_time = common.format_.string_to_datetime(value)\n\n date_time_iso = property(fget = None, fset = set_date_time_iso)\n\n\nclass Anniversary(ndb.Model):\n uid = ndb.StringProperty(required = True)\n label = ndb.StringProperty(required = True)\n year = ndb.IntegerProperty()\n month = ndb.IntegerProperty(choices = range(1, 13))\n day = ndb.IntegerProperty(choices = range(1, 32))\n\n ct = ndb.DateTimeProperty(required = True, verbose_name = u\"creation_timestamp\")\n cu = ndb.StringProperty(required = True, verbose_name = u\"creation_user\")\n et = ndb.DateTimeProperty(required = True, verbose_name = u\"edit_timestamp\")\n eu = ndb.StringProperty(required = True, verbose_name = u\"edit_user\")\n\n\nclass Address(ndb.Model):\n \"\"\"\n See: https://en.wikipedia.org/wiki/VCard#Properties\n \"\"\"\n\n def get_birthday_iso(self):\n\n \"\"\"\n Returns the birthday date as ISO string, if possible\n \"\"\"\n\n if not self.anniversary_items:\n return\n\n for anniversary_item in self.anniversary_items:\n assert isinstance(anniversary_item, Anniversary)\n if anniversary_item.label and anniversary_item.label.lower() in [\n u\"geburtstag\",\n u\"birthday\"\n ]:\n # Birthday found\n if anniversary_item.year:\n return u\"{year}-{month}-{day}\".format(\n year = anniversary_item.year,\n month = anniversary_item.month,\n day = anniversary_item.day\n )\n else:\n return u\"{month}-{day}\".format(\n month = anniversary_item.month,\n day = anniversary_item.day\n )\n\n\n def get_age(self):\n \"\"\"\n Returns the age if possible\n \"\"\"\n\n if not self.anniversary_items:\n return\n\n for anniversary_item in self.anniversary_items:\n assert isinstance(anniversary_item, Anniversary)\n if anniversary_item.label and anniversary_item.label.lower() in [\n u\"geburtstag\",\n u\"birthday\"\n ]:\n if anniversary_item.year:\n # Birthday found\n birthday = datetime.date(\n year = anniversary_item.year,\n month = anniversary_item.month,\n day = anniversary_item.day\n )\n return age_years(birthday)\n\n\n uid = ndb.StringProperty(required = True)\n owner = ndb.StringProperty(required = True)\n\n ct = ndb.DateTimeProperty(required = True, verbose_name = u\"creation_timestamp\")\n cu = ndb.StringProperty(required = True, verbose_name = u\"creation_user\")\n et = ndb.DateTimeProperty(required = True, verbose_name = u\"edit_timestamp\")\n eu = ndb.StringProperty(required = True, verbose_name = u\"edit_user\")\n dt = ndb.DateTimeProperty(verbose_name = u\"deletion_timestamp\")\n\n # Kind\n kind = ndb.StringProperty(required = True)\n\n # Category Items\n category_items = ndb.StringProperty(repeated = True)\n\n # Tag Items\n tag_items = ndb.StringProperty(repeated = True)\n\n # Organization\n organization = ndb.StringProperty(indexed = False)\n organization_lower = ndb.ComputedProperty(\n lambda self: self.organization.lower() if self.organization else None\n )\n organization_char1 = ndb.ComputedProperty(\n lambda self: self.organization[0].lower() if self.organization else None\n )\n\n # Position\n position = ndb.StringProperty(indexed = False)\n position_lower = ndb.ComputedProperty(\n lambda self: self.position.lower() if self.position else None\n )\n position_char1 = ndb.ComputedProperty(\n lambda self: self.position[0].lower() if self.position else None\n )\n\n # Salutation\n salutation = ndb.StringProperty(indexed = False)\n salutation_lower = ndb.ComputedProperty(\n lambda self: self.salutation.lower() if self.salutation else None\n )\n salutation_char1 = ndb.ComputedProperty(\n lambda self: self.salutation[0].lower() if self.salutation else None\n )\n\n # First name\n first_name = ndb.StringProperty(indexed = False)\n first_name_lower = ndb.ComputedProperty(\n lambda self: self.first_name.lower() if self.first_name else None\n )\n first_name_char1 = ndb.ComputedProperty(\n lambda self: self.first_name[0].lower() if self.first_name else None\n )\n\n # Last name\n last_name = ndb.StringProperty(indexed = False)\n last_name_lower = ndb.ComputedProperty(\n lambda self: self.last_name.lower() if self.last_name else None\n )\n last_name_char1 = ndb.ComputedProperty(\n lambda self: self.last_name[0].lower() if self.last_name else None\n )\n\n # Nickname\n nickname = ndb.StringProperty(indexed = False)\n nickname_lower = ndb.ComputedProperty(\n lambda self: self.nickname.lower() if self.nickname else None\n )\n nickname_char1 = ndb.ComputedProperty(\n lambda self: self.nickname[0].lower() if self.nickname else None\n )\n\n # Street\n street = ndb.StringProperty(indexed = False)\n street_lower = ndb.ComputedProperty(\n lambda self: self.street.lower() if self.street else None\n )\n street_char1 = ndb.ComputedProperty(\n lambda self: self.street[0].lower() if self.street else None\n )\n\n # Postcode\n postcode = ndb.StringProperty(indexed = False)\n postcode_lower = ndb.ComputedProperty(\n lambda self: self.postcode.lower() if self.postcode else None\n )\n postcode_char1 = ndb.ComputedProperty(\n lambda self: self.postcode[0].lower() if self.postcode else None\n )\n\n # City\n city = ndb.StringProperty(indexed = False)\n city_lower = ndb.ComputedProperty(\n lambda self: self.city.lower() if self.city else None\n )\n city_char1 = ndb.ComputedProperty(\n lambda self: self.city[0].lower() if self.city else None\n )\n\n # District\n district = ndb.StringProperty(indexed = False)\n district_lower = ndb.ComputedProperty(\n lambda self: self.district.lower() if self.district else None\n )\n district_char1 = ndb.ComputedProperty(\n lambda self: self.district[0].lower() if self.district else None\n )\n\n # Land (Bundesland)\n land = ndb.StringProperty(indexed = False)\n land_lower = ndb.ComputedProperty(\n lambda self: self.land.lower() if self.land else None\n )\n land_char1 = ndb.ComputedProperty(\n lambda self: self.land[0].lower() if self.land else None\n )\n\n # Country\n country = ndb.StringProperty(indexed = False) # Land\n country_lower = ndb.ComputedProperty(\n lambda self: self.country.lower() if self.country else None\n )\n country_char1 = ndb.ComputedProperty(\n lambda self: self.country[0].lower() if self.country else None\n )\n\n phone_items = ndb.StructuredProperty(Tel, repeated = True) # Telefonnummern\n email_items = ndb.StructuredProperty(Email, repeated = True) # E-Mail-Adressen\n url_items = ndb.StructuredProperty(Url, repeated = True) # URLs\n journal_items = ndb.StructuredProperty(JournalItem, repeated = True) # Journaleinträge\n business_items = ndb.StringProperty(repeated = True) # Branchen\n anniversary_items = ndb.StructuredProperty(Anniversary, repeated = True) # Jahrestage, Geburtstag\n gender = ndb.StringProperty()\n birthday = ndb.ComputedProperty(get_birthday_iso)\n age = property(fget = get_age)\n note_items = ndb.StructuredProperty(Note, repeated = True) # Notizen\n agreement_items = ndb.StructuredProperty(Agreement, repeated = True) # Vereinbarungen\n\n\n def to_dict(\n self,\n include = None,\n exclude = None,\n exclude_creation_metadata = None,\n exclude_edit_metadata = None,\n exclude_empty_fields = None\n ):\n \"\"\"\n Return address-dict without unneeded fields\n \"\"\"\n\n exclude = exclude or []\n if exclude_creation_metadata:\n exclude.extend([\"ct\", \"cu\"])\n if exclude_edit_metadata:\n exclude.extend([\"et\", \"eu\"])\n\n # Exclude _lower-Fields\n for property in Address._properties.values():\n if property._name.endswith(\"_lower\"):\n exclude.append(property._name)\n\n # Convert address to dictionary\n address_dict = self._to_dict(include = include, exclude = exclude)\n address_dict = copy.deepcopy(address_dict)\n address_dict[\"key_urlsafe\"] = self.key.urlsafe()\n\n # Repeated fields\n for fieldname in [\n \"phone_items\",\n \"email_items\",\n \"url_items\",\n \"note_items\",\n \"journal_items\",\n \"anniversary_items\",\n ]:\n if fieldname not in address_dict:\n continue\n\n for field_item in address_dict.get(fieldname, []):\n # Exclude creation metadata\n if exclude_creation_metadata:\n if \"ct\" in field_item:\n del field_item[\"ct\"]\n if \"cu\" in field_item:\n del field_item[\"cu\"]\n\n # Exclude edit metadata\n if exclude_edit_metadata:\n if \"et\" in field_item:\n del field_item[\"et\"]\n if \"eu\" in field_item:\n del field_item[\"eu\"]\n\n # Exclude empty fields\n if exclude_empty_fields:\n field = address_dict.get(fieldname, [])\n if not field:\n del address_dict[fieldname]\n\n # Exclude empty fields\n if exclude_empty_fields:\n for key, value in address_dict.items():\n if value is None:\n del address_dict[key]\n elif key in [\"category_items\", \"business_items\", \"tag_items\"]:\n if not value:\n del address_dict[key]\n\n # Finished\n return address_dict\n\n\n @ndb.ComputedProperty\n def deleted(self):\n \"\"\"\n Returns `True` if *DeletionTimestamp* is set.\n \"\"\"\n\n return bool(self.dt)\n\n\n def put(self, **ctx_options):\n \"\"\"\n Writes the address to the datastore.\n\n Adds a document to the Search-Index.\n \"\"\"\n\n # Save address\n key = ndb.Model.put(self, **ctx_options)\n\n # Gather information for the index\n fields = []\n if self.kind is not None:\n fields.append(search.TextField(name = u\"kind\", value = self.kind))\n if self.organization is not None:\n if common.format_.has_umlauts(self.organization):\n fields.append(search.TextField(\n name = u\"organization\", value = common.format_.replace_umlauts(self.organization)\n ))\n fields.append(search.TextField(name = u\"organization\", value = self.organization))\n if self.position is not None:\n if common.format_.has_umlauts(self.position):\n fields.append(search.TextField(\n name = u\"position\", value = common.format_.replace_umlauts(self.position)\n ))\n fields.append(search.TextField(name = u\"position\", value = self.position))\n if self.salutation is not None:\n if common.format_.has_umlauts(self.salutation):\n fields.append(search.TextField(\n name = u\"salutation\", value = common.format_.replace_umlauts(self.salutation)\n ))\n fields.append(search.TextField(name = u\"salutation\", value = self.salutation))\n if self.first_name is not None:\n if common.format_.has_umlauts(self.first_name):\n fields.append(search.TextField(\n name = u\"first_name\", value = common.format_.replace_umlauts(self.first_name)\n ))\n fields.append(search.TextField(name = u\"first_name\", value = self.first_name))\n if self.last_name is not None:\n if common.format_.has_umlauts(self.last_name):\n fields.append(search.TextField(\n name = u\"last_name\", value = common.format_.replace_umlauts(self.last_name)\n ))\n fields.append(search.TextField(name = u\"last_name\", value = self.last_name))\n if self.nickname is not None:\n if common.format_.has_umlauts(self.nickname):\n fields.append(search.TextField(\n name = u\"nickname\", value = common.format_.replace_umlauts(self.nickname)\n ))\n fields.append(search.TextField(name = u\"nickname\", value = self.nickname))\n if self.street is not None:\n if common.format_.has_umlauts(self.street):\n fields.append(search.TextField(\n name = u\"street\", value = common.format_.replace_umlauts(self.street)\n ))\n fields.append(search.TextField(name = u\"street\", value = self.street))\n if self.postcode is not None:\n fields.append(search.TextField(name = u\"postcode\", value = self.postcode))\n if self.city is not None:\n if common.format_.has_umlauts(self.city):\n fields.append(search.TextField(\n name = u\"city\", value = common.format_.replace_umlauts(self.city)\n ))\n fields.append(search.TextField(name = u\"city\", value = self.city))\n if self.district is not None:\n if common.format_.has_umlauts(self.district):\n fields.append(search.TextField(\n name = u\"district\", value = common.format_.replace_umlauts(self.district)\n ))\n fields.append(search.TextField(name = u\"district\", value = self.district))\n if self.land is not None:\n if common.format_.has_umlauts(self.land):\n fields.append(search.TextField(\n name = u\"land\", value = common.format_.replace_umlauts(self.land)\n ))\n fields.append(search.TextField(name = u\"land\", value = self.land))\n if self.country is not None:\n if common.format_.has_umlauts(self.country):\n fields.append(search.TextField(\n name = u\"country\", value = common.format_.replace_umlauts(self.country)\n ))\n fields.append(search.TextField(name = u\"country\", value = self.country))\n if self.gender is not None:\n fields.append(search.TextField(name = u\"gender\", value = self.gender))\n\n for category_item in self.category_items:\n if common.format_.has_umlauts(category_item):\n fields.append(search.TextField(\n name = u\"category\", value = common.format_.replace_umlauts(category_item)\n ))\n fields.append(search.TextField(name = u\"category\", value = category_item))\n for tag_item in self.tag_items:\n if common.format_.has_umlauts(tag_item):\n fields.append(search.TextField(\n name = u\"tag\", value = common.format_.replace_umlauts(tag_item)\n ))\n fields.append(search.TextField(name = u\"tag\", value = tag_item))\n for business_item in self.business_items:\n if common.format_.has_umlauts(business_item):\n fields.append(search.TextField(\n name = u\"business\", value = common.format_.replace_umlauts(business_item)\n ))\n fields.append(search.TextField(name = u\"business\", value = business_item))\n for phone_item in self.phone_items:\n assert isinstance(phone_item, Tel)\n fields.append(search.TextField(name = u\"phone\", value = phone_item.number))\n for email_item in self.email_items:\n assert isinstance(email_item, Email)\n fields.append(search.TextField(name = u\"email\", value = email_item.email))\n for url_item in self.url_items:\n assert isinstance(url_item, Url)\n fields.append(search.TextField(name = u\"url\", value = url_item.url))\n for journal_item in self.journal_items:\n if common.format_.has_umlauts(journal_item.text):\n fields.append(search.TextField(\n name = u\"journal\", value = common.format_.replace_umlauts(journal_item.text)\n ))\n assert isinstance(journal_item, JournalItem)\n fields.append(search.TextField(name = u\"journal\", value = journal_item.text))\n for note_item in self.note_items:\n if common.format_.has_umlauts(note_item.text):\n fields.append(search.TextField(\n name = u\"note\", value = common.format_.replace_umlauts(note_item.text)\n ))\n assert isinstance(note_item, Note)\n fields.append(search.TextField(name = u\"note\", value = note_item.text))\n for agreement_item in self.agreement_items:\n if common.format_.has_umlauts(agreement_item.text):\n fields.append(search.TextField(\n name = u\"agreement\", value = common.format_.replace_umlauts(agreement_item.text)\n ))\n assert isinstance(agreement_item, Agreement)\n fields.append(search.TextField(name = u\"agreement\", value = agreement_item.text))\n for anniversary_item in self.anniversary_items:\n assert isinstance(anniversary_item, Anniversary)\n if anniversary_item.year and anniversary_item.month and anniversary_item.day:\n fields.append(\n search.DateField(\n name = u\"anniversary\", value = datetime.date(\n anniversary_item.year,\n anniversary_item.month,\n anniversary_item.day\n )\n )\n )\n elif anniversary_item.month and anniversary_item.day:\n fields.append(\n search.TextField(\n name = u\"anniversary\",\n value = unicode(anniversary_item.month) + u\"-\" + unicode(anniversary_item.day)\n )\n )\n fields.append(search.TextField(name = u\"anniversary\", value = anniversary_item.label))\n # Document\n document = search.Document(\n doc_id = key.urlsafe(),\n fields = fields\n )\n\n # Index (deferred)\n deferred.defer(\n _put_address_to_index,\n document = document\n )\n\n # Finished\n return key\n\n\ndef _put_address_to_index(document):\n \"\"\"\n Adds the address to the search_index\n \"\"\"\n\n index = search.Index(name = \"Address\")\n index.put(document)\n","sub_path":"application/common/model/address.py","file_name":"address.py","file_ext":"py","file_size_in_byte":23284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"218161547","text":"#Rutina para generar los términos de la sucesión de fibonacci\na=int(input(\"Intruduzca hasta que posición de la sucesión quiere generar los términos: \"))\nprint(\"\")\ndef f(n):\n if n < 2:\n return 1\n else:\n return f(n-1) + f(n-2)\nprint(\"La lista de los términos de la sucesión hasta la posición pedida es:\")\nprint(\"\")\nfor x in range(a):\n print(f(x))\n\n","sub_path":"Fibonacci.py","file_name":"Fibonacci.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"161273382","text":"#!/usr/bin/env python\n\nimport argparse\nimport json\n\nimport imp\nimport sys\n\nimport aws\nimport boto\nimport boto.cloudformation\nimport boto.cloudformation.stack\nimport boto.ec2.autoscale\nimport boto.ec2.elb\nimport boto.exception\nimport boto.iam\nimport boto.s3\nimport boto.sns\nimport oobe\nimport os\nimport utilities\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Create Environment.')\n oobe.setup_default_args(parser)\n parser.add_argument('--skip-health-check', help=\"Skip instance health checks.\", default=False, action='store_true')\n parser.add_argument('--use-python-template', help=\"Use python based cloud formation template.\", default=False, action='store_true')\n parser.add_argument('--override', help=\"Override key=value pairs to override cloudformation params.\", action=oobe.StoreNameValuePair)\n parser.add_argument('--cfn-dump', help=\"Dump Cloudformation template to file.\", default=False, action='store_true')\n args = oobe.parse_args(parser)\n\n oobe.setup_logging(args['app_log_level'], args['other_log_level'])\n logger = utilities.get_logger(__name__)\n\n working_dir = oobe.get_opscode_root()\n logger.debug(\"Dirs = [%s], [%s], \" % (working_dir, os.path.dirname(sys.argv[0])))\n\n temp_dir = None\n try:\n deployment_config, temp_dir = oobe.setup_deployment_vars(args, working_dir)\n cloudformation = boto.cloudformation.connect_to_region(args['region'])\n sns = boto.sns.connect_to_region(args['region'])\n ec2 = boto.ec2.connect_to_region(args['region'])\n s3 = boto.s3.connect_to_region(args['region'])\n auto_scale = boto.ec2.autoscale.connect_to_region(args['region'])\n elb = boto.ec2.elb.connect_to_region(args['region'])\n iam = boto.iam.connect_to_region(args['region'])\n\n overrides = {}\n if 'override' in args and args['override'] is not None:\n for key, value in args['override'].items():\n overrides[key] = value\n\n if 'ImageID' not in overrides or overrides['ImageID'] is None:\n images = aws.list_images(ec2)\n overrides['ImageID'] = images[0].id\n logger.info(\"Using AMI ID [%s]\", (overrides['ImageID']))\n\n iam_role = aws.list_iam_role(iam, deployment_config['deployment']['aws']['iam']['role'])\n if iam_role is None:\n aws.create_iam_role(iam, deployment_config['deployment']['aws']['iam']['role'])\n\n aws.prepare_cloudformation_stack_parameters(deployment_config, sns, overrides)\n\n root_cloudformation_template_path = deployment_config['deployment']['aws']['cloudformation']['template']['json']['path']\n\n static_stack_name = deployment_config['deployment']['aws']['cloudformation']['template']['json']['static']['name']\n cloudformation_static_template_name = deployment_config['deployment']['aws']['cloudformation']['template']['json']['static']['file']\n new_static_stack_id, static_stack_already_exists = aws.find_existing_stack(cloudformation, static_stack_name)\n cloudformation_static_template = load_cloudformation_template(cloudformation_static_template_name,\n root_cloudformation_template_path, False)\n\n new_static_stack_id = aws.launch_cloudformation_stack(cloudformation, s3, static_stack_name, deployment_config,\n cloudformation_static_template, static_stack_already_exists,\n new_static_stack_id)\n aws.wait_for_stack_status(cloudformation, new_static_stack_id)\n\n stack_name = deployment_config['deployment']['aws']['cloudformation']['template']['json']['resources']['name']\n cloudformation_template_name = deployment_config['deployment']['aws']['cloudformation']['template']['json']['resources']['file']\n new_stack_id, stack_already_exists = aws.find_existing_stack(cloudformation, stack_name)\n cloudformation_template = load_cloudformation_template(cloudformation_template_name, root_cloudformation_template_path, args['cfn_dump'])\n\n new_stack_id = aws.launch_cloudformation_stack(cloudformation, s3, stack_name, deployment_config,\n cloudformation_template, stack_already_exists, new_stack_id)\n aws.wait_for_stack_status(cloudformation, new_stack_id)\n\n if not args['skip_health_check']:\n aws.check_stack_health(cloudformation, ec2, auto_scale, deployment_config, new_stack_id)\n auto_scale_group_names = aws.get_stack_resources(cloudformation, new_stack_id, 'AWS::AutoScaling::AutoScalingGroup')\n aws.check_load_balancer_health(elb, ec2, auto_scale, auto_scale_group_names, deployment_config)\n\n logger.info(\"Deployment successful.\")\n finally:\n oobe.cleanup(temp_dir)\n\n\ndef load_cloudformation_template(cloudformation_template_name, root_cloudformation_template_path, cfn_dump):\n logger = utilities.get_logger(__name__)\n cloudformation_template_file = os.path.join(root_cloudformation_template_path, cloudformation_template_name)\n logger.info(\"Using cloudformation json template from [%s]\" % cloudformation_template_file)\n logger.debug(\"Template Start ---------------------------------\")\n cloudformation_template_json = json.load(open(cloudformation_template_file))\n cloudformation_template = json.dumps(cloudformation_template_json, sort_keys=True, indent=4, separators=(',', ': '))\n logger.debug(cloudformation_template)\n logger.debug(\"Template End ---------------------------------\")\n if cfn_dump:\n raw_cfn_template_file = open(\"raw.json\", 'w')\n raw_cfn_template_file.write(cloudformation_template)\n raw_cfn_template_file.close()\n return cloudformation_template\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"oobe-opscode/operation_code/toolchain/rundeck/scripts/rd_CreateEnvironment.py","file_name":"rd_CreateEnvironment.py","file_ext":"py","file_size_in_byte":5886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"426865457","text":"from django.db import models\r\nfrom django.contrib.auth.models import User\r\n\r\n# Create your models here.\r\n\r\n\r\nclass Categoria(models.Model):\r\n TIPO_CHOICES = (\r\n ('R', 'Receita'),\r\n ('D', 'Despesa'),\r\n )\r\n nome = models.CharField(verbose_name='Nome', max_length=50)\r\n tipo = models.CharField(verbose_name='Tipo', max_length=1, choices=TIPO_CHOICES)\r\n descricao = models.TextField(verbose_name='Descrição', blank=True, null=True)\r\n usuario = models.ForeignKey(User, verbose_name='Usuário', on_delete=models.CASCADE)\r\n\r\n class Meta:\r\n verbose_name = 'Categoria'\r\n verbose_name_plural = 'Categorias'\r\n ordering = ['nome']\r\n\r\n def __str__(self):\r\n return self.nome\r\n","sub_path":"apps/geral/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"497587503","text":"# https://oj.leetcode.com/problems/populating-next-right-pointers-in-each-node/\n\n# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n# self.next = None\n\nclass Solution:\n # @param root, a tree node\n # @return nothing\n def connect(self, root):\n self.connectNode(root, None)\n\n def connectNode(self, root, sibling):\n if root == None:\n return\n\n root.next = sibling\n self.connectNode(root.left, root.right)\n if sibling != None:\n self.connectNode(root.right, sibling.left)\n else:\n self.connectNode(root.right, None)\n\nclass Solution1:\n # @param root, a tree node\n # @return nothing\n def connect(self, root):\n if root == None:\n return None\n\n # connect by layer\n start = root\n while start.left != None:\n cur = start\n while cur != None:\n cur.left.next = cur.right\n if cur.next != None:\n cur.right.next = cur.next.left\n cur = cur.next\n start = start.left\n","sub_path":"leetans/populateRightI.py","file_name":"populateRightI.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"650020002","text":"from flask import Flask, url_for, render_template, request, session\r\nimport sqlite3\r\n\r\napp = Flask(__name__)\r\nid = ''\r\nname = ''\r\nphoto = ''\r\nmarks = ''\r\n@app.route(\"/home\")\r\ndef home():\r\n return render_template('home.html')\r\n\r\n@app.route(\"/register\")\r\ndef register():\r\n return render_template('registration.html')\r\n\r\n@app.route(\"/success\", methods = ['POST'])\r\ndef my_form_post():\r\n conn = sqlite3.connect('database.db')\r\n c = conn.cursor()\r\n try:\r\n c.execute('''SELECT * FROM STUDENT''')\r\n #c.execute('')\r\n c.execute('''DELETE FROM STUDENT''')\r\n except:\r\n c.execute('''CREATE TABLE STUDENT(id text, name text, dob text, photo text, gender text, result text)''')\r\n conn.commit()\r\n c.execute('''SELECT COUNT(1) FROM STUDENT''')\r\n n = c.fetchone()[0]\r\n name = request.form['name']\r\n dob = request.form['dob']\r\n if(request.form['optradio1'] == 'on'):\r\n gender = \"MALE\"\r\n else:\r\n gender = \"FEMALE\"\r\n id = \"SAT\"\r\n n += 1\r\n if(n<10):\r\n id += \"00\"\r\n elif(n<100):\r\n id += \"0\"\r\n id += str(n)\r\n photo = request.form['photo']\r\n result = 0\r\n params = (id,name,dob,photo,gender,result)\r\n c.execute(\"INSERT INTO STUDENT VALUES(?,?,?,?,?,?)\",params)\r\n conn.commit()\r\n conn.close()\r\n return render_template('registration_success.html', name=name, dob=dob, gender=gender, photo=photo, id = id)\r\n\r\n@app.route(\"/test_enter\")\r\ndef test_enter():\r\n return render_template('test_entry.html')\r\n\r\n@app.route(\"/test_wait\", methods = ['POST'])\r\ndef test_wait():\r\n conn = sqlite3.connect('database.db')\r\n c = conn.cursor()\r\n global id\r\n global name\r\n global photo\r\n id = request.form['id']\r\n pwd = request.form['password']\r\n d = pwd[0:2]+'/'+pwd[2:4]+'/'+pwd[4:]\r\n params = (id,d,)\r\n c.execute('SELECT * FROM STUDENT WHERE id = ? AND dob = ?',params)\r\n row = c.fetchone()\r\n if row is None:\r\n return render_template('test_entry.html',error='User ID or Password Invalid!')\r\n name = row[1]\r\n photo = row[3]\r\n conn.close()\r\n return render_template('test_wait.html',id=id,name=name,photo=photo)\r\n\r\n@app.route(\"/test\")\r\ndef test():\r\n '''f = open(\"static/Q.txt\")\r\n ln=\"\"\r\n for line in f:\r\n ln+=line\r\n arr = ln.split(\"$$\")'''\r\n global id\r\n global name\r\n global photo\r\n return render_template('test.html',id=id,name=name,photo=photo)\r\n\r\n@app.route(\"/test_after\", methods=['POST'])\r\ndef test_after():\r\n #conn = sqlite3.connect('database.db')\r\n #c = conn.cursor()\r\n global marks\r\n id = request.form['id']\r\n marks = request.form['marks']\r\n #print(type(m))\r\n #params = (m,id,)\r\n #p2 = (id,)\r\n #c.execute(\"UPDATE STUDENT SET result = ? WHERE id = ?\",params)\r\n #c.execute('SELECT * FROM STUDENT WHERE id = ?',p2)\r\n #row = c.fetchone()\r\n #print(row)\r\n #row = c.fetchone()\r\n #conn.commit()\r\n #conn.close()\r\n return render_template('test_after.html')\r\n\r\n@app.route(\"/result\")\r\ndef result():\r\n return render_template('result.html')\r\n\r\n@app.route(\"/display_result\",methods=['POST'])\r\ndef display_result():\r\n conn = sqlite3.connect('database.db')\r\n c = conn.cursor()\r\n id = request.form['id']\r\n pwd = request.form['password']\r\n d = pwd[0:2]+'/'+pwd[2:4]+'/'+pwd[4:]\r\n params = (id,d,)\r\n try:\r\n c.execute('SELECT * FROM STUDENT WHERE id = ? AND dob = ?',params)\r\n except:\r\n return render_template('result.html',error='User ID or Password Invalid!')\r\n row = c.fetchone()\r\n name = row[1]\r\n dob = row[2]\r\n photo = row[3]\r\n gender = row[4]\r\n #result = row[5]\r\n #print(result)\r\n conn.close()\r\n return render_template('display_result.html',id=id,name=name,dob=dob,photo=photo,gender=gender,result=marks)\r\n\r\nif __name__ == '__main__':\r\n app.run(\r\n host=\"127.0.0.1\",\r\n port=int(\"5000\")\r\n )","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"463635452","text":"# SPDX-FileCopyrightText: 2022 Oxhead Alpha\n# SPDX-License-Identifier: LicenseRef-MIT-OA\n\n\"\"\"\nContains shared code from all Tezos wizards for a command line wizard skeleton.\n\nHelps with writing a tool that asks questions, validates answers, and executes\nthe appropriate steps using the final configuration.\n\"\"\"\n\nimport os, sys, subprocess, shlex, shutil\nimport re, textwrap\nimport argparse\nimport urllib.request\nimport json\n\n# Regexes\n\nsecret_key_regex = b\"(encrypted|unencrypted):(?:\\w{54}|\\w{88})\"\naddress_regex = b\"tz[123]\\w{33}\"\nprotocol_hash_regex = (\n b\"P[123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{50}\"\n)\nsigner_uri_regex = b\"((?:tcp|unix|https|http):\\/\\/.+)\\/(tz[123]\\w{33})\\/?\"\nledger_regex = b\"ledger:\\/\\/[\\w\\-]+\\/[\\w\\-]+\\/[\\w']+\\/[\\w']+\"\nderivation_path_regex = b\"(?:bip25519|ed25519|secp256k1|P-256)\\/[0-9]+h\\/[0-9]+h\"\n\n\n# Input validators\n\n\ndef enum_range_validator(options):\n def _validator(input):\n intrange = list(map(str, range(1, len(options) + 1)))\n if input not in intrange and input not in options:\n raise ValueError(\n \"Please choose one of the provided values or use their respective numbers.\"\n )\n try:\n opt = int(input) - 1\n except:\n return input\n else:\n opts = options\n if isinstance(options, dict):\n opts = list(options.keys())\n return opts[opt]\n\n return _validator\n\n\ndef dirpath_validator(input):\n if input and not os.path.isdir(input):\n raise ValueError(\"Please input a valid path to a directory.\")\n return input\n\n\ndef filepath_validator(input):\n if input and not os.path.isfile(input):\n raise ValueError(\"Please input a valid file path.\")\n return input\n\n\ndef reachable_url_validator(suffix=None):\n def _validator(input):\n full_url = mk_full_url(input, suffix)\n if url_is_reachable(full_url):\n return input\n else:\n raise ValueError(f\"{full_url} is unreachable. Please input a valid URL.\")\n\n return _validator\n\n\ndef required_field_validator(input):\n if not input.strip():\n raise ValueError(\"Please provide this required option.\")\n return input\n\n\n# The input has to be valid to at least one of the two passed validators.\ndef or_validator(validator1, validator2):\n def _validator(input):\n try:\n return validator1(input)\n except:\n return validator2(input)\n\n return _validator\n\n\n# Runs the input through the passed validator, allowing for possible alteration,\n# but doesn't raise an exception if it doesn't validate to allow for custom options, too.\ndef or_custom_validator(validator):\n def _validator(input):\n result = input\n try:\n result = validator(input)\n except:\n pass\n return result\n\n return _validator\n\n\n# To be validated, the input should adhere to the Tezos secret key format:\n# {encrypted, unencrypted}:\ndef secret_key_validator(input):\n match = re.match(secret_key_regex.decode(\"utf-8\"), input.strip())\n if not bool(match):\n raise ValueError(\n \"The input doesn't match the format for a Tezos secret key: \"\n \"{{encrypted, unencrypted}:}\"\n \"\\nPlease check the input and try again.\"\n )\n return input\n\n\n# To be validated, the input should adhere to the derivation path format:\n# [0-9]+h/[0-9]+h\ndef derivation_path_validator(input):\n derivation_path_regex_str = \"[0-9]+h\\/[0-9]+h\"\n match = re.match(derivation_path_regex_str, input.strip())\n if not bool(match):\n raise ValueError(\n \"The input doesn't match the format for a derivation path: \"\n + derivation_path_regex_str\n + \"\\nPlease check the input and try again.\"\n )\n return input\n\n\n# To be validated, the input should adhere to the signer URI format:\n# (tcp|unix|https|http):///tz[123]\\w{33}\ndef signer_uri_validator(input):\n match = re.match(signer_uri_regex.decode(\"utf-8\"), input.strip())\n if not bool(match):\n raise ValueError(\n \"The input doesn't match the format for a remote signer URI: \"\n + \"(tcp|unix|https|http):///\"\n + \"\\nPlease check the input and try again.\"\n )\n return input\n\n\n# To be validated, the input should adhere to the protocol hash format:\n# \ndef protocol_hash_validator(input):\n proto_hash_regex_str = protocol_hash_regex.decode(\"utf-8\")\n match = re.match(proto_hash_regex_str, input.strip())\n if not bool(match):\n raise ValueError(\n \"The input doesn't match the format for a protocol hash: \"\n + proto_hash_regex_str\n + \"\\nPlease check the input and try again.\"\n )\n return input\n\n\nclass Validator:\n def __init__(self, validator):\n self.validator = validator\n\n def validate(self, input):\n if self.validator is not None:\n if isinstance(self.validator, list):\n for v in self.validator:\n input = v(input)\n return input\n else:\n return self.validator(input)\n\n\n# Command line argument parsing\n\n\nparser = argparse.ArgumentParser()\n\n\n# Utilities\n\n\ndef proc_call(cmd):\n return subprocess.check_call(shlex.split(cmd))\n\n\ndef get_proc_output(cmd):\n if sys.version_info.major == 3 and sys.version_info.minor < 7:\n return subprocess.run(shlex.split(cmd), stdout=subprocess.PIPE)\n else:\n return subprocess.run(shlex.split(cmd), capture_output=True)\n\n\ndef show_systemd_service(service_name):\n return get_proc_output(f\"systemctl show {service_name}.service\").stdout\n\n\ndef find_systemd_env_files(show_systemd_output):\n return re.findall(b\"EnvironmentFiles?=(.*) \", show_systemd_output)\n\n\ndef find_systemd_unit_env(show_systemd_output):\n unit_env = re.search(b\"Environment=(.*)(?:$|\\n)\", show_systemd_output)\n if unit_env is not None:\n return unit_env.group(1).decode(\"utf-8\")\n return \"\"\n\n\n# Returns all the environment variables of a systemd service unit\n# Note: definitions directly in the unit (not in environment files) take precedence\ndef get_systemd_service_env(service_name):\n result = dict()\n sys_show = show_systemd_service(service_name)\n\n for env_file in find_systemd_env_files(sys_show):\n with open(env_file, \"r\") as f:\n for line in f:\n env_def = re.search(\"^(\\w+)=(.*)\\n\", line)\n if env_def is not None:\n env_var = env_def.group(1)\n var_val = env_def.group(2).strip('\"')\n result[env_var] = var_val\n\n env_matches = re.findall(\n r'(\\w+)=((\"(?:\\\\.|[^\"\\\\])*\")|([\\S]+))',\n find_systemd_unit_env(sys_show),\n )\n for env_match in env_matches:\n env_var = env_match[0]\n var_val = env_match[1].strip('\"')\n result[env_var] = var_val\n\n return result\n\n\ndef replace_systemd_service_env(service_name, field, value):\n for env_file in find_systemd_env_files(show_systemd_service(service_name)):\n with open(env_file, \"r\") as f:\n config_contents = f.read()\n\n old = re.search(f\"{field}=.*\", config_contents)\n if old is not None:\n new = f\"{field}={value}\"\n proc_call(\n f\"sudo sed -i 's|{old.group(0)}|{new}|' {env_file.decode('utf8')}\"\n )\n\n\ndef progressbar_hook(chunk_number, chunk_size, total_size):\n done = chunk_number * chunk_size\n percent = min(int(done * 100 / total_size), 100)\n print(\"Progress:\", percent, \"%,\", int(done / (1024 * 1024)), \"MB\", end=\"\\r\")\n\n\ndef color(input, colorcode):\n return colorcode + input + \"\\x1b[0m\"\n\n\ncolor_red = \"\\x1b[1;31m\"\n\n\ndef yes_or_no(prompt, default=None):\n valid = False\n while not valid:\n answer = input(prompt).strip().lower()\n if not answer and default is not None:\n answer = default\n if answer in [\"y\", \"yes\"]:\n print()\n return True\n elif answer in [\"n\", \"no\"]:\n print()\n return False\n else:\n print(color(\"Please provide a 'yes' or 'no' answer.\", color_red))\n\n\ndef mk_full_url(host_name, path):\n if path is None:\n return host_name.rstrip(\"/\")\n else:\n return \"/\".join([host_name.rstrip(\"/\"), path.lstrip(\"/\")])\n\n\ndef url_is_reachable(url):\n req = urllib.request.Request(url, headers=http_request_headers)\n try:\n urllib.request.urlopen(req)\n return True\n except (urllib.error.URLError, ValueError):\n return False\n\n\n# Global options\n\nkey_import_modes = {\n \"ledger\": \"From a ledger\",\n \"secret-key\": \"Either the unencrypted or password-encrypted secret key for your address\",\n \"remote\": \"Remote key governed by a signer running on a different machine\",\n \"generate-fresh-key\": \"Generate fresh key that should be filled manually later\",\n \"json\": \"Faucet JSON file\",\n}\n\nnetworks = {\n \"mainnet\": \"Main Tezos network\",\n \"ghostnet\": \"Long running test network, currently using the Nairobi Tezos protocol\",\n \"nairobinet\": \"Test network using the Nairobi Tezos protocol\",\n \"oxfordnet\": \"Test network using the Oxford Tezos protocol\",\n}\n\nhttp_request_headers = {\"User-Agent\": \"Mozilla/5.0\"}\n\n# Wizard CLI skeleton\n\n\nsuppress_warning_text = \"TEZOS_CLIENT_UNSAFE_DISABLE_DISCLAIMER=YES\"\n\n\ndef get_data_dir(network):\n node_env = get_systemd_service_env(f\"tezos-node-{network}\")\n data_dir = node_env.get(\"TEZOS_NODE_DIR\")\n if data_dir is None:\n print(\n \"TEZOS_NODE_DIR is undefined, defaulting to /var/lib/tezos/node-\" + network\n )\n return \"/var/lib/tezos/node-\" + network\n return data_dir\n\n\ndef get_key_address(tezos_client_options, key_alias):\n address = get_proc_output(\n f\"sudo -u tezos {suppress_warning_text} octez-client {tezos_client_options} \"\n f\"show address {key_alias} --show-secret\"\n )\n if address.returncode == 0:\n value_regex = (\n b\"(?:\"\n + ledger_regex\n + b\")|(?:\"\n + secret_key_regex\n + b\")|(?:remote\\:\"\n + address_regex\n + b\")\"\n )\n value = re.search(value_regex, address.stdout).group(0).decode()\n address = re.search(address_regex, address.stdout).group(0).decode()\n return (value, address)\n else:\n return None\n\n\ndef wait_for_ledger_app(ledger_app, client_dir):\n output = b\"\"\n try:\n while re.search(f\"Found a Tezos {ledger_app}\".encode(), output) is None:\n output = get_proc_output(\n f\"sudo -u tezos {suppress_warning_text} octez-client --base-dir {client_dir} list connected ledgers\"\n ).stdout\n proc_call(\"sleep 1\")\n except KeyboardInterrupt:\n return None\n ledgers_derivations = {}\n for ledger_derivation in re.findall(ledger_regex, output):\n ledger_url = (\n re.search(b\"ledger:\\/\\/[\\w\\-]+\\/\", ledger_derivation).group(0).decode()\n )\n derivation_path = (\n re.search(derivation_path_regex, ledger_derivation).group(0).decode()\n )\n ledgers_derivations.setdefault(ledger_url, []).append(derivation_path)\n return ledgers_derivations\n\n\ndef ledger_urls_info(ledgers_derivations, node_endpoint, client_dir):\n ledgers_info = {}\n max_derivation_len = 0\n for derivations_paths in ledgers_derivations.values():\n max_derivation_len = max(max_derivation_len, max(map(len, derivations_paths)))\n for ledger_url, derivations_paths in ledgers_derivations.items():\n for derivation_path in derivations_paths:\n output = get_proc_output(\n f\"sudo -u tezos {suppress_warning_text} octez-client --base-dir {client_dir} \"\n f\"show ledger {ledger_url + derivation_path}\"\n ).stdout\n addr = re.search(address_regex, output).group(0).decode()\n balance = (\n get_proc_output(\n f\"sudo -u tezos {suppress_warning_text} octez-client --base-dir {client_dir} \"\n f\"--endpoint {node_endpoint} get balance for {addr}\"\n )\n .stdout.decode()\n .strip()\n )\n ledgers_info.setdefault(ledger_url, []).append(\n (\n \"{:\" + str(max_derivation_len + 1) + \"} address: {}, balance: {}\"\n ).format(derivation_path + \",\", addr, balance)\n )\n return ledgers_info\n\n\ndef search_json_with_default(json_filepath, field, default):\n with open(json_filepath, \"r\") as f:\n try:\n json_dict = json.load(f)\n except:\n return default\n return json_dict.pop(field, default)\n\n\nclass Step:\n def __init__(\n self,\n id: str,\n prompt: str,\n help: str,\n default: str = \"1\",\n options=None,\n validator=None,\n ):\n self.id = id\n self.prompt = prompt\n self.help = help\n self.default = default\n self.options = options\n self.validator = validator\n\n def pprint_options(self):\n i = 1\n def_i = None\n try:\n def_i = int(self.default)\n except:\n pass\n\n if self.options and isinstance(self.options, list):\n options_count = 0\n for o in self.options:\n if isinstance(o, dict):\n for values in o.values():\n if not isinstance(values, list):\n options_count += 1\n else:\n options_count += len(values)\n else:\n options_count += 1\n index_len = len(str(options_count))\n str_format = f\"{{:{index_len}}}. {{}}\"\n for o in self.options:\n if isinstance(o, dict):\n for k, values in o.items():\n print()\n print(f\"'{k}':\")\n print()\n if not isinstance(values, list):\n values = [values]\n for v in values:\n if def_i is not None and i == def_i:\n print(str_format.format(i, \"(default) \" + v))\n else:\n print(str_format.format(i, v))\n i += 1\n print()\n else:\n if def_i is not None and i == def_i:\n print(str_format.format(i, \"(default) \" + o))\n else:\n print(str_format.format(i, o))\n i += 1\n elif self.options and isinstance(self.options, dict):\n index_len = len(str(len(self.options)))\n max_option_len = max(map(len, self.options.keys()))\n padding = max(26, max_option_len + 2)\n indent_size = index_len + 4 + padding\n str_format = f\"{{:{index_len}}}. {{:<{padding}}} {{}}\"\n for o in self.options:\n description = textwrap.indent(\n textwrap.fill(self.options[o], 60),\n \" \" * indent_size,\n ).lstrip()\n if def_i is not None and i == def_i:\n print(str_format.format(i, o + \" (default)\", description))\n else:\n print(str_format.format(i, o, description))\n i += 1\n elif not self.options and self.default is not None:\n print(\"Default:\", self.default)\n\n\n# Steps\n\nsecret_key_query = Step(\n id=\"secret_key\",\n prompt=\"Provide either the unencrypted or password-encrypted secret key for your address.\",\n help=\"The format is 'unencrypted:edsk...' for the unencrypted key, or 'encrypted:edesk...'\"\n \"for the encrypted key.\",\n default=None,\n validator=Validator([required_field_validator, secret_key_validator]),\n)\n\nremote_signer_uri_query = Step(\n id=\"remote_signer_uri\",\n prompt=\"Provide your remote key with the address of the signer.\",\n help=\"The format is the address of your remote signer host, followed by a public key,\\n\"\n \"i.e. something like http://127.0.0.1:6732/tz1V8fDHpHzN8RrZqiYCHaJM9EocsYZch5Cy\\n\"\n \"The supported schemes are https, http, tcp, and unix.\",\n default=None,\n validator=Validator([required_field_validator, signer_uri_validator]),\n)\n\nderivation_path_query = Step(\n id=\"derivation_path\",\n prompt=\"Provide derivation path for the key stored on the ledger.\",\n help=\"The format is '[0-9]+h/[0-9]+h'\",\n default=None,\n validator=Validator([required_field_validator, derivation_path_validator]),\n)\n\n\njson_filepath_query = Step(\n id=\"json_filepath\",\n prompt=\"Provide the path to your downloaded faucet JSON file.\",\n help=\"The file should contain the 'mnemonic' and 'secret' fields.\",\n default=None,\n validator=Validator([required_field_validator, filepath_validator]),\n)\n\n\ndef get_ledger_url_query(ledgers):\n return Step(\n id=\"ledger_url\",\n prompt=\"Choose a ledger to get the new derivation from.\",\n options=ledgers,\n default=None,\n validator=Validator([required_field_validator, enum_range_validator(ledgers)]),\n help=\"In order to specify new derivation path, you need to specify a ledger to get the derivation from.\",\n )\n\n\n# We define this step as a function since the corresponding step requires\n# tezos-node to be running and bootstrapped in order to gather the data\n# about the ledger-stored addresses, so it's called right before invoking\n# after the node was boostrapped\ndef get_ledger_derivation_query(ledgers_derivations, node_endpoint, client_dir):\n extra_options = [\"Specify derivation path\", \"Go back\"]\n full_ledger_urls = []\n for ledger_url, derivations_paths in ledgers_derivations.items():\n for derivation_path in derivations_paths:\n full_ledger_urls.append(ledger_url + derivation_path)\n return Step(\n id=\"ledger_derivation\",\n prompt=\"Select a key to import from the ledger.\\n\"\n \"You can choose one of the suggested derivations or provide your own:\",\n help=\"'Specify derivation path' will ask a derivation path from you.\"\n \"'Go back' will return you back to the key type choice.\",\n default=None,\n options=[ledger_urls_info(ledgers_derivations, node_endpoint, client_dir)]\n + extra_options,\n validator=Validator(\n [\n required_field_validator,\n enum_range_validator(full_ledger_urls + extra_options),\n ]\n ),\n )\n\n\nclass Setup:\n def __init__(self, config={}):\n self.config = config\n\n def query_step(self, step: Step):\n validated = False\n while not validated:\n print(step.prompt)\n step.pprint_options()\n answer = input(\"> \").strip()\n\n if answer.lower() in [\"quit\", \"exit\"]:\n raise KeyboardInterrupt\n elif answer.lower() in [\"help\", \"?\"]:\n print(step.help)\n print()\n else:\n if not answer and step.default is not None:\n answer = step.default\n\n try:\n if step.validator is not None:\n answer = step.validator.validate(answer)\n except ValueError as e:\n print(color(\"Validation error: \" + str(e), color_red))\n else:\n validated = True\n self.config[step.id] = answer\n\n def systemctl_simple_action(self, action, service):\n proc_call(\n f\"sudo systemctl {action} tezos-{service}-{self.config['network']}.service\"\n )\n\n def systemctl_enable(self):\n if self.config[\"systemd_mode\"] == \"yes\":\n print(\n \"Enabling the tezos-{}-{}.service\".format(\n self.config[\"mode\"], self.config[\"network\"]\n )\n )\n self.systemctl_simple_action(\"enable\", self.config[\"mode\"])\n else:\n print(\"The services won't restart on boot.\")\n\n def get_tezos_client_options(self):\n options = (\n f\"--base-dir {self.config['client_data_dir']} \"\n f\"--endpoint {self.config['node_rpc_endpoint']}\"\n )\n if \"remote_host\" in self.config:\n options += f\" -R '{self.config['remote_host']}'\"\n return options\n\n def query_and_update_config(self, query):\n self.query_step(query)\n self.config[\"tezos_client_options\"] = self.get_tezos_client_options()\n proc_call(\n f\"sudo -u tezos {suppress_warning_text} octez-client \"\n f\"{self.config['tezos_client_options']} config update\"\n )\n\n def fill_baking_config(self):\n net = self.config[\"network\"]\n baking_env = get_systemd_service_env(f\"tezos-baking-{net}\")\n\n self.config[\"client_data_dir\"] = baking_env.get(\n \"TEZOS_CLIENT_DIR\",\n \"/var/lib/tezos/.tezos-client\",\n )\n\n node_rpc_addr = baking_env.get(\n \"NODE_RPC_ADDR\",\n \"localhost:8732\",\n )\n self.config[\"node_rpc_addr\"] = node_rpc_addr\n self.config[\"node_rpc_endpoint\"] = \"http://\" + node_rpc_addr\n\n self.config[\"baker_alias\"] = baking_env.get(\"BAKER_ADDRESS_ALIAS\", \"baker\")\n\n def fill_remote_signer_infos(self):\n self.query_step(remote_signer_uri_query)\n\n rsu = re.search(signer_uri_regex.decode(), self.config[\"remote_signer_uri\"])\n\n self.config[\"remote_host\"] = rsu.group(1)\n self.config[\"remote_key\"] = rsu.group(2)\n\n def get_current_head_level(self):\n response = urllib.request.urlopen(\n self.config[\"node_rpc_endpoint\"] + \"/chains/main/blocks/head/header\"\n )\n return str(json.load(response)[\"level\"])\n\n # Check if an account with the baker_alias alias already exists, and ask the user\n # if it can be overwritten.\n def check_baker_account(self):\n baker_alias = self.config[\"baker_alias\"]\n baker_key_value = get_key_address(self.get_tezos_client_options(), baker_alias)\n if baker_key_value is not None:\n value, address = baker_key_value\n print()\n print(\"An account with the '\" + baker_alias + \"' alias already exists.\")\n print(\"Its current address is\", address)\n\n return yes_or_no(\n \"Would you like to import a new key and replace this one? \", \"no\"\n )\n else:\n return True\n\n def import_key(self, key_mode_query, ledger_app=None):\n\n baker_alias = self.config[\"baker_alias\"]\n tezos_client_options = self.get_tezos_client_options()\n\n valid_choice = False\n while not valid_choice:\n\n try:\n self.query_step(key_mode_query)\n\n if self.config[\"key_import_mode\"] == \"secret-key\":\n self.query_step(secret_key_query)\n proc_call(\n f\"sudo -u tezos {suppress_warning_text} octez-client {tezos_client_options} \"\n f\"import secret key {baker_alias} {self.config['secret_key']} --force\"\n )\n elif self.config[\"key_import_mode\"] == \"remote\":\n self.fill_remote_signer_infos()\n\n tezos_client_options = self.get_tezos_client_options()\n proc_call(\n f\"sudo -u tezos {suppress_warning_text} octez-client {tezos_client_options} \"\n f\"import secret key {baker_alias} remote:{self.config['remote_key']} --force\"\n )\n elif self.config[\"key_import_mode\"] == \"generate-fresh-key\":\n proc_call(\n f\"sudo -u tezos {suppress_warning_text} octez-client {tezos_client_options} \"\n f\"gen keys {baker_alias} --force\"\n )\n print(\"Newly generated baker key:\")\n proc_call(\n f\"sudo -u tezos {suppress_warning_text} octez-client {tezos_client_options} \"\n f\"show address {baker_alias}\"\n )\n network = self.config[\"network\"]\n print(\n f\"Before proceeding with baker registration you'll need to provide this address with some XTZ.\\n\"\n f\"Note that you need at least 6000 XTZ in order to receive baking and endorsing rights.\\n\"\n f\"You can fill your address using the faucet: https://faucet.{network}.teztnets.xyz/.\\n\"\n f\"Waiting for funds to arrive... (Ctrl + C to choose another option).\"\n )\n try:\n while True:\n result = get_proc_output(\n f\"sudo -u tezos {suppress_warning_text} octez-client {tezos_client_options} \"\n f\"register key {baker_alias} as delegate\"\n )\n if result.returncode == 0:\n print(result.stdout.decode(\"utf8\"))\n break\n else:\n proc_call(\"sleep 1\")\n except KeyboardInterrupt:\n print(\"Going back to the import mode selection.\")\n continue\n elif self.config[\"key_import_mode\"] == \"json\":\n self.query_step(json_filepath_query)\n json_tmp_path = shutil.copy(self.config[\"json_filepath\"], \"/tmp/\")\n proc_call(\n f\"sudo -u tezos {suppress_warning_text} octez-client {tezos_client_options} \"\n f\"activate account {baker_alias} with {json_tmp_path} --force\"\n )\n try:\n os.remove(json_tmp_path)\n except:\n pass\n else:\n print(f\"Please open the Tezos {ledger_app} app on your ledger or\")\n print(\"press Ctrl+C to go back to the key import mode selection.\")\n ledgers_derivations = wait_for_ledger_app(\n ledger_app, self.config[\"client_data_dir\"]\n )\n if ledgers_derivations is None:\n print(\"Going back to the import mode selection.\")\n continue\n ledgers = list(ledgers_derivations.keys())\n baker_ledger_url = \"\"\n while re.match(ledger_regex.decode(), baker_ledger_url) is None:\n self.query_step(\n get_ledger_derivation_query(\n ledgers_derivations,\n self.config[\"node_rpc_endpoint\"],\n self.config[\"client_data_dir\"],\n )\n )\n if self.config[\"ledger_derivation\"] == \"Go back\":\n self.import_key(key_mode_query, ledger_app)\n return\n elif (\n self.config[\"ledger_derivation\"]\n == \"Specify derivation path\"\n ):\n if len(ledgers) >= 1:\n # If there is only one connected ledger, there is nothing to choose from\n if len(ledgers) == 1:\n ledger_url = ledgers[0]\n else:\n self.query_step(get_ledger_url_query(ledgers))\n ledger_url = self.config[\"ledger_url\"]\n self.query_step(derivation_path_query)\n signing_curves = [\n \"bip25519\",\n \"ed25519\",\n \"secp256k1\",\n \"P-256\",\n ]\n for signing_curve in signing_curves:\n ledgers_derivations.setdefault(\n ledger_url, []\n ).append(\n signing_curve\n + \"/\"\n + self.config[\"derivation_path\"]\n )\n else:\n baker_ledger_url = self.config[\"ledger_derivation\"]\n proc_call(\n f\"sudo -u tezos {suppress_warning_text} octez-client {tezos_client_options} \"\n f\"import secret key {baker_alias} {baker_ledger_url} --force\"\n )\n\n except EOFError:\n raise EOFError\n except Exception as e:\n print(\"Something went wrong when calling octez-client:\")\n print(str(e))\n print()\n print(\"Please check your input and try again.\")\n else:\n valid_choice = True\n value, _ = get_key_address(\n tezos_client_options, self.config[\"baker_alias\"]\n )\n self.config[\"baker_key_value\"] = value\n","sub_path":"baking/src/tezos_baking/wizard_structure.py","file_name":"wizard_structure.py","file_ext":"py","file_size_in_byte":29933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"7966350","text":"import logging\n\nfrom app.models.game import Question\nfrom app.models.game import GameCategory\n\nfrom app.views.page_controller import PageController\n\nclass QuestionList( PageController ):\n def get(self):\n return\n\n def do_questionListById(self,params):\n cid = int(params['id'])\n qry = Question.query(Question.category_id == cid).fetch()\n allQuestions = []\n cat = GameCategory.query(GameCategory.id == cid).get()\n\n if cat == None:\n ctitle='none'\n else:\n ctitle = cat.title\n\n for q in qry:\n question = {\n 'id': q.id,\n 'c_id': q.category_id,\n 'question':q.question,\n 'answer':q.answer,\n 'value':q.value\n }\n allQuestions.append(question)\n\n tvalues = {\n 'questions':allQuestions,\n 'ctitle':ctitle\n }\n\n self.send_template( '../templates/questionList.html', tvalues )\n return\n\n def do_removeQuestionById(self,params):\n id = int(params['id'])\n qry = Question.query(Question.id == id).fetch()\n for q in qry:\n q.key.delete()\n return\n\n def do_findHighestQuestionId(self,params):\n entry = Question.query().order(-Question.id).get();\n if entry==None:\n id = 0\n else:\n id = entry.id\n\n self.send_json({\"id\":id})\n return\n","sub_path":"app/views/questionList.py","file_name":"questionList.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"210944430","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 11 14:20:02 2020\nhttps://github.com/MJeremy2017/Reinforcement-Learning-Implementation/blob/master/DynaMaze/DynaMaze.py\n@author: vcardenas.local.\n\"\"\"\n\nimport gym\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport datetime\nimport time\n\n# ROWS = 6\n# COLS = 9\n# S = (2, 0)\n# G = (0, 8)\n# BLOCKS = [(1, 2), (2, 2), (3, 2), (0, 7), (1, 7), (2, 7), (4, 5)]\n# ACTIONS = [\"left\", \"up\", \"right\", \"down\"]\n# ------------------------------------------------------------------------------\n# 5. Models\n# ------------------------------------------------------------------------------\n\n# Implementation of a Table Lookup Model as showed by David Silver in\n# COMPM050/COMPGI13 Lecture 8, slide 15\nclass TableLookupModel:\n def __init__(self, nS, nA):\n self.nS = nS\n self.nA = nA\n #Q = np.zeros(buckets + (n_actions,))\n self.N = np.zeros((nS, nA)) # Keep track of the number of times (s,a)\n # has appeared\n self.SprimeCounter = np.zeros((nS, nA, nS)) # Number of times (s,a)\n # resulted in s'\n self.Rcounter = np.zeros((nS, nA)) # Total reward obtained by (s,a)\n self.observedStates = [] # states that have appeared before\n self.observedActions = [[] for i in range(nS)] # actions observed before\n # at every state\n self.terminalStates = [] # No knowledge about terminal states assumed\n\n # Experience is considered as a tuple of (state, action, reward, state_prime)\n def addExperience(self, experience):\n s, a, r, s_prime = experience\n self.N[s][a] += 1\n self.SprimeCounter[s][a][s_prime] += 1\n self.Rcounter[s][a] += r\n if not s in self.observedStates: self.observedStates.append(s)\n if not a in self.observedActions[s]: self.observedActions[s].append(a)\n\n # Samples the resulting state of (s,a)\n def sampleStatePrime(self, state, action):\n # If there is no information about (s,a), then sample randomly\n if self.N[state][action] == 0: return np.random.choice(range(self.nS))\n\n prob = self.SprimeCounter[state][action] / self.N[state][action]\n return np.random.choice(range(self.nS), p = prob)\n\n # Samples the resulting reward of (s,a)\n def sampleReward(self, state, action):\n # If there is no information about (s,a), then return a fixed reward\n if self.N[state][action] == 0: return 0\n\n return self.Rcounter[state][action] / self.N[state][action]\n\n # Sample a random state that has been observed before\n def sampleRandState(self):\n return np.random.choice(self.observedStates)\n\n # Sample a random action previously observed in a given state\n def sampleRandAction(self, state):\n return np.random.choice(self.observedActions[state])\n\n # Give model knowledge about terminal states\n def addTerminalStates(self, term_states):\n self.terminalStates = term_states\n\n # Check wether a state is terminal (assuming model has knowledge about\n # terminal states)\n def isTerminal(self, state):\n return state in self.terminalStates\n#######################\n\n\ndef DynaAgentPlay(buckets, # define o numero de buckets para cada estado valor (x, x', theta, theta')\n n_episodes, # numero de episodios\n n_steps, # numero de maximo de steps por episodio\n min_alpha, # taxa de aprendizado\n min_epsilon, # taxa de explotacao\n gamma, # fator de desconto\n ada_divisor): # taxa de decaimento para os parametro de alpha e epsilon\n buckets = (1,1,6,12) \n env = gym.make('CartPole-v0')\n n_actions = env.action_space.n # nro de acoes\n n_states = env.observation_space.shape[0]# nro de estados\n state_actions = [] # armazena estado acao\n \n # inicialize Q(s,a) e Modelo(s,a) para todo estado s e acao a\n Q = np.zeros(buckets + (n_actions,))\n model = {} # Initialize model\n N = {} #Keep track of the number of times (s,a) has appeared\n SprimeCounter = {} # Number of times (s,a) resulted in s'\n Rcounter = {} #Total reward obtained by (s,a)\n\n # define upper and lower bounds for each state value\n upper_bounds = [\n env.observation_space.high[0],\n 0.5,\n env.observation_space.high[2],\n math.radians(50)\n ]\n lower_bounds = [\n env.observation_space.low[0],\n -0.5,\n env.observation_space.low[2],\n -math.radians(50)]\n\n \n def discretize(obs):\n ''' discretise the continuous state into buckets '''\n ratios = [(obs[i] + abs(lower_bounds[i])) / (upper_bounds[i] - lower_bounds[i]) for i in range(len(obs))]\n new_obs = [int(round((buckets[i] - 1) * ratios[i])) for i in range(len(obs))]\n new_obs = [min(buckets[i] - 1, max(0, new_obs[i])) for i in range(len(obs))]\n return tuple(new_obs)\n \n \n def epsilon_policy(state, epsilon):\n ''' choose an action using the epsilon policy '''\n exploration_exploitation_tradeoff = np.random.random()\n if exploration_exploitation_tradeoff <= epsilon:\n action = env.action_space.sample() # exploration\n else:\n action = np.argmax(Q[state]) # exploitation\n return action\n \n def get_epsilon(t):\n ''' decrease the exploration rate at each episode '''\n if ada_divisor == 0:\n return min_epsilon # epsilon fixo\n else:\n return max(min_epsilon, min(1, 1.0 - math.log10((t + 1) / ada_divisor))) # adaptativo\n\n def get_alpha(t):\n ''' decrease the learning rate at each episode '''\n if ada_divisor == 0:\n return min_alpha # alpha fixo\n else:\n return max(min_alpha, min(1.0, 1.0 - math.log10((t + 1) / ada_divisor))) # adaptativo\n \n def update_q(current_state, action, reward, new_state, alpha):\n ''' update the Q matrix with the Bellman equation '''\n Q[current_state][action] += alpha * (reward + gamma * np.max(Q[new_state]) - Q[current_state][action])\n \n # Experience is considered as a tuple of (state, action, reward, state_prime)\n def addExperience(current_state, action, reward, new_state):\n if current_state not in model.keys():\n model[current_state] = {}\n N[current_state] = {action:0}\n SprimeCounter[current_state] = {action:{new_state:0}}\n Rcounter[current_state] = {action:0}\n if action not in N[current_state].keys():\n N[current_state] = {action:0}\n SprimeCounter[current_state] = {action:{new_state:0}}\n Rcounter[current_state] = {action:0}\n if new_state not in SprimeCounter[current_state][action].keys():\n SprimeCounter[current_state][action] = {new_state:0}\n \n model[current_state][action] = (reward, new_state) # self.model[self.state][action] = (reward, new_State) ex (0, 0, 2, 6): {1: (1.0, (0, 0, 2, 4)), 0: (1.0, (0, 0, 2, 8))}\n N[current_state][action] += 1\n #print(N)\n SprimeCounter[current_state][action][new_state] += 1\n Rcounter[current_state][action] += reward\n #if not s in self.observedStates: self.observedStates.append(s)\n #if not a in self.observedActions[s]: self.observedActions[s].append(a)\n\n\n # Samples the resulting reward of (s,a)\n def sampleReward(state, action):\n # If there is no information about (s,a), then return a fixed reward\n if action not in N[state].keys() or N[state][action] == 0: \n return 0\n return Rcounter[state][action] / N[state][action]\n \n # Samples the resulting state of (s,a)\n def sampleStatePrime(state, action):\n # If there is no information about (s,a), then sample randomly \n reward, new_State = model[state][action]\n return new_State\n # prob = SprimeCounter[state][action][new_State] / N[state][action]\n # return np.random.choice([new_State,], p = [prob,1-prob])\n # #\n \n \n\n \n # #randomState = #[np.random.uniform(low=-2.4, high=2.4),np.random.uniform(low=-1000000, high=1000000),np.random.uniform(low=-2.4, high=2.4), np.random.uniform(low=-1000000, high=1000000)]\n # #randomState = discretize(randomState) \n # if N[state][action] == 0: \n # return new_State #np.random.choice(range(nS)) \n \n # prob = SprimeCounter[state][action][new_State] / N[state][action]\n \n # if prob > 0.5:\n # return new_State\n # else:\n # return model[state][action]\n\n \n \n \n #n_steps_per_episode = [] \n df_results = pd.DataFrame(columns=['episode', 'alpha', 'epsilon', 'n_steps', 'episode_rewards'])\n \n \n for episode in range(n_episodes): \n #print('dyna ', episode)\n current_state = env.reset()\n current_state = discretize(current_state) \n alpha = get_alpha(episode)\n epsilon = get_epsilon(episode)\n \n episode_rewards = 0\n state_actions = [] \n # repetir em quanto o pole ainda esteja em pe\n for t in range(n_steps): # self.maze.end:\n # 1. escolha uma estado e uma acao real\n action = epsilon_policy(current_state, epsilon) \n state_actions.append((current_state, action))\n \n # 2. observe uma recompensa resultante a experiencia real\n new_state, reward, done, _ = env.step(action) #nxtState = self.maze.nxtPosition(action)\n #reward = self.maze.giveReward()\n # discreticacao do novo estado\n new_state = discretize(new_state)\n \n # 3. atualice o valor de Q-value com Q-learning \n update_q(current_state, action, reward, new_state, alpha)\n #self.Q_values[self.state][action] += self.alpha*(reward + np.max(list(self.Q_values[nxtState].values())) - self.Q_values[self.state][action])\n\n # increment the cumulative reward\n episode_rewards += reward\n \n # 4. atualice o modelo do ambiente com esta experiencia real\n # if current_state not in model.keys():\n # model[current_state] = {}\n # model[current_state][action] = (reward, new_state)\n # current_state = new_state #self.state = nxtState\n addExperience(current_state, action, reward, new_state)\n \n current_state = new_state #self.state = nxtState\n ###### DYNA com simulacao\n # 5. repita n vezes para atualizar o Q-valor aleatoriamente\n #if (t > 2000):\n for temp in range(5):# n_steps):\n # escolha um estado hipotetic entre os estadosobservados\n rand_idx = np.random.choice(range(len(model.keys())))\n _state = list(model)[rand_idx] # lista das keys do modelo key=(s)\n \n # escolha uma acao hipotetica entre as acoes observadas\n rand_idx = np.random.choice(range(len(model[_state].keys()))) # retorna as acoes registradas em model para esse state\n _action = list(model[_state])[rand_idx]\n \n # simule recompensa e seguinte estado resultante com o modelo do ambiente!!!\n #_reward, _new_state = model[_state][_action]\n _reward = sampleReward(_state, _action)\n _new_state = sampleStatePrime(_state, _action)\n \n # aplique aprendizado por reforço a esta experiencia hipotética\n update_q(_state, _action, _reward, _new_state, alpha)\n \n #Q_values[_state][_action] += self.alpha*(_reward + np.max(list(self.Q_values[_nxtState].values())) - self.Q_values[_state][_action]) \n \n # end of game\n if done:\n #print('Episode:{}/{} Total steps: {} Total reward: {}'.format(episode, n_episodes, t, episode_rewards))\n break\n #print('Episode:{}/{} Total steps: {} Total reward: {}'.format(episode, n_episodes, t, episode_rewards))\n # append the episode cumulative reward to the reward list\n df_results.loc[len(df_results)] = [episode, alpha, epsilon, t, episode_rewards]\n \n #n_steps_per_episode.append(len(state_actions)) \n #self.reset()\n return df_results\n\n\n######################################\n# EXPERIMENTOS\n######################################\ndef meanResults(df, n):\n df_len = len(df)\n count = 0\n dfs = []\n e = 1\n while True:\n if count > df_len - 1:\n break\n start = count\n count += n\n # print(\"%s : %s\" % (start, count))\n x = df.iloc[start: count]\n dfs.append([e, x['episode_rewards'].mean()])\n e += 1\n df = pd.DataFrame(dfs)\n df.columns = ['episode', 'episode_rewards']\n # print(df)\n return df\n\ndef combAlphaEp(alphas, epsilons):\n alpha_epsilon = []\n for alpha in alphas:\n for epsilon in epsilons:\n alpha_epsilon.append((alpha,epsilon))\n return alpha_epsilon\n\ndef exp_dyna_q(nr, buckets, n_episodes, batch, n_steps, alpha_epsilon, ada_divisor, gamma):\n print('INICIO dyna ', datetime.datetime.now().time())\n start_time = time.time()\n # config grafico\n plt.style.use('seaborn')\n palette = plt.get_cmap('tab20')\n plt.figure(figsize=(8, 7), dpi=100)\n plt.xlabel('Episodios (x' + str(batch) + ')')\n plt.ylabel('Recompensas por episodio')\n df_results = pd.DataFrame()\n num = 0\n for (alpha, epsilon) in alpha_epsilon:\n num += 1\n name_exp = 'dynaq' + str(nr) + '_' + 'epis' + str(n_episodes) + '_batch' + str(batch) + '_alpha' + str(\n alpha) + '_epsi' + str(epsilon) + '_T' + str(int(time.time()))\n print(name_exp)\n df_results = DynaAgentPlay(buckets, n_episodes, n_steps, alpha, epsilon, gamma, ada_divisor)\n df_results = meanResults(df_results, batch)\n df_results.to_csv(str(num)+'dynaaaa df_results'+name_exp+'.csv', index=False)\n #plt.savefig('results/graph_'+name_exp+'.png', dpi=100)\n plt.title('Dyna-Q: Curva de evolução de aprendizado', loc='center', fontsize=12,\n fontweight=0) # alpha: ' + str(alpha) + ' e epsilon: ' + epsilon\n x = df_results['episode']\n y = df_results['episode_rewards']\n #plt.plot(x, y, linewidth=2.5, dashes=[int(alpha * 20 + 3), 2], color=palette(num),\n # label='a: ' + str(alpha) + ', e: ' + str(epsilon))\n plt.plot(x, y, linewidth=2.5, color=palette(num),\n label='a: ' + str(alpha) + ', ep: ' + str(epsilon))\n ax = plt.subplot(111)\n ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), ncol=4)\n #plt.legend(loc='lower right', mode=\"expand\", borderaxespad=0.)\n plt.axvline(10, color='r', ls=\"dotted\")\n plt.axhline(175, color='r', ls=\"dotted\")\n #plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 4))\n plt.tight_layout()\n end_time = time.time()\n m, s = divmod(end_time - start_time, 60)\n h, m = divmod(m, 60)\n print('Tempo total: ', '%02d:%02d:%02d' % (h, m, s))\n print('FIM', datetime.datetime.now().time())\n plt.savefig('graph_dyna.png', dpi=100)\n plt.show()\n\n\n\nif __name__ == \"__main__\":\n # comparison\n # alpha_epsilon = combAlphaEp(alphas=[0.25, 0.5, 0.9], epsilons=[0.001, 0.5, 0.9])\n # exp_dyna_q(nr=12, buckets=(1, 1, 6, 12), n_episodes=10000, batch=100, n_steps=300, alpha_epsilon=alpha_epsilon, ada_divisor=0, gamma=1)\n \n # alpha_epsilon = combAlphaEp(alphas=[0.25, 0.3, 0.6, 0.8], epsilons=[0.0001, 0.3, 0.6, 0.8])\n # exp_dyna_q(nr=12, buckets=(1, 1, 6, 12), n_episodes=10000, batch=100, n_steps=300, alpha_epsilon=alpha_epsilon, ada_divisor=0, gamma=1)\n \n # alpha_epsilon = combAlphaEp(alphas=[0.1, 0.1, 0.1, 0.1], epsilons=[0.1, 0.1, 0.1, 0.1, 0.1])\n # exp_dyna_q(nr=12, buckets=(1, 1, 6, 12), n_episodes=10000, batch=100, n_steps=300,alpha_epsilon=alpha_epsilon, ada_divisor=25, gamma=1)\n \n alpha_epsilon= [(0.3,0.1)]\n exp_dyna_q(nr=30, buckets=(1, 1, 6, 12), n_episodes=10000, batch=100, n_steps=200,alpha_epsilon=alpha_epsilon, ada_divisor=0, gamma=1)\n\n # alpha_epsilon = combAlphaEp(alphas=[0.9], epsilons=[0.1])\n # exp_dyna_q(nr=31, buckets=(5, 5, 6, 12), n_episodes=10000, batch=100, n_steps=200,alpha_epsilon=alpha_epsilon, ada_divisor=0, gamma=1)\n\n # alpha_epsilon = combAlphaEp(alphas=[0.3, 0.0025], epsilons=[0.1, 0.5 ,0.9])\n # exp_dyna_q(nr=32, buckets=(1, 1, 2, 2), n_episodes=10000, batch=100, n_steps=200,alpha_epsilon=alpha_epsilon, ada_divisor=0, gamma=1)","sub_path":"Machine Learning/Reinforment Learning/6_sample_officient_dyna_q_discretize_cartpole.py","file_name":"6_sample_officient_dyna_q_discretize_cartpole.py","file_ext":"py","file_size_in_byte":16830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"637180237","text":"\"\"\" gather functions necessary to build an index \"\"\"\n\nimport re\nfrom typing import Optional, Tuple, Union\n\nimport faiss\nfrom autofaiss.external.metadata import IndexMetadata\nfrom autofaiss.datasets.readers.local_iterators import read_embeddings_local, read_shapes_local\nfrom autofaiss.datasets.readers.remote_iterators import read_embeddings_remote, read_filenames\nfrom autofaiss.external.optimize import (\n check_if_index_needs_training,\n compute_memory_necessary_for_training,\n get_optimal_batch_size,\n get_optimal_index_keys_v2,\n get_optimal_train_size,\n set_search_hyperparameters,\n)\nfrom autofaiss.indices.index_factory import index_factory\nfrom autofaiss.utils.cast import (\n cast_bytes_to_memory_string,\n cast_memory_to_bytes,\n to_faiss_metric_type,\n to_readable_time,\n)\nfrom autofaiss.utils.decorators import Timeit\n\n\ndef estimate_memory_required_for_index_creation(\n nb_vectors: int, vec_dim: int, index_key: Optional[str] = None, max_index_memory_usage: Optional[str] = None\n) -> Tuple[int, str]:\n \"\"\"\n Estimates the RAM necessary to create the index\n The value returned is in Bytes\n \"\"\"\n\n if index_key is None:\n if max_index_memory_usage is not None:\n index_key = get_optimal_index_keys_v2(nb_vectors, vec_dim, max_index_memory_usage)[0]\n else:\n raise ValueError(\"you should give max_index_memory_usage value if no index_key is given\")\n\n metadata = IndexMetadata(index_key, nb_vectors, vec_dim)\n\n index_memory = metadata.estimated_index_size_in_bytes()\n needed_for_adding = min(index_memory * 0.1, 10 ** 9)\n index_overhead = index_memory * 0.1\n\n index_needs_training = check_if_index_needs_training(index_key)\n\n if index_needs_training:\n # Compute the smallest number of vectors required to train the index given\n # the maximal memory constraint\n nb_vectors_train = get_optimal_train_size(nb_vectors, index_key, \"1K\", vec_dim)\n\n memory_for_training = (\n compute_memory_necessary_for_training(nb_vectors_train, index_key, vec_dim) + index_memory * 0.25\n )\n else:\n memory_for_training = 0\n\n return (int(index_overhead + max(index_memory + needed_for_adding, memory_for_training))), index_key\n\n\ndef get_estimated_download_time_infos(\n embeddings_hdfs_path: str, bandwidth_gbytes_per_sec: float = 1.0, indent: int = 0\n) -> Tuple[str, Tuple[int, int]]:\n \"\"\"\n Gives a general approximation of the download time (and preprocessing time) of embeddings\n \"\"\"\n nb_vectors_approx, vec_dim = get_nb_vectors_approx_and_dim_from_hdfs(embeddings_hdfs_path)\n\n size = 4 * nb_vectors_approx * vec_dim\n\n download = 1.1 * size / (bandwidth_gbytes_per_sec * 1024 ** 3) # seconds\n preprocess = 1.6 * download # seconds\n\n infos = (\n f\"-> Download: {to_readable_time(download, rounding=True)}\\n\"\n f\"-> Preprocess: {to_readable_time(preprocess, rounding=True)}\\n\"\n f\"Total: {to_readable_time(download + preprocess, rounding=True)}\"\n \" (< 1 minute if files are already cached)\"\n )\n\n tab = \"\\t\" * indent\n infos = tab + infos.replace(\"\\n\", \"\\n\" + tab)\n\n return infos, (nb_vectors_approx, vec_dim)\n\n\ndef get_estimated_construction_time_infos(nb_vectors: int, vec_dim: int, indent: int = 0) -> str:\n \"\"\"\n Gives a general approximation of the construction time of the index\n \"\"\"\n\n size = 4 * nb_vectors * vec_dim\n\n train = 1000 # seconds, depends on the number of points for training\n add = 450 * size / (150 * 1024 ** 3) # seconds, Linear approx (450s for 150GB in classic conditions)\n\n infos = (\n f\"-> Train: {to_readable_time(train, rounding=True)}\\n\"\n f\"-> Add: {to_readable_time(add, rounding=True)}\\n\"\n f\"Total: {to_readable_time(train + add, rounding=True)}\"\n )\n tab = \"\\t\" * indent\n infos = tab + infos.replace(\"\\n\", \"\\n\" + tab)\n\n return infos\n\n\ndef get_nb_vectors_approx_and_dim_from_hdfs(parquet_embeddings_path: str) -> Tuple[int, int]:\n \"\"\"legacy function to give the dimensions of a parquet file\n Still useful for tests\"\"\"\n\n # Get information for one partition\n avg_batch_length, vec_dim = next(read_embeddings_remote(parquet_embeddings_path, verbose=False)).shape\n\n # Count the number of files\n nb_files = len(read_filenames(parquet_embeddings_path))\n\n nb_vectors_approx = nb_files * avg_batch_length\n\n return nb_vectors_approx, vec_dim\n\n\ndef get_nb_vectors_and_dim(embeddings_path: str) -> Tuple[int, int]:\n \"\"\"\n Function that gives the total shape of the embeddings array\n \"\"\"\n\n tot_vec = 0\n vec_dim = -1\n\n for shape in read_shapes_local(embeddings_path):\n batch_length, dim = shape\n tot_vec += batch_length\n vec_dim = dim\n\n return tot_vec, vec_dim\n\n\ndef build_index(\n embeddings_path: str,\n index_key: str,\n metric_type: Union[str, int],\n nb_vectors: int,\n current_memory_available: str,\n use_gpu: bool = False,\n):\n \"\"\"\n Function that returns an index on the numpy arrays stored on disk in the embeddings_path path.\n \"\"\"\n\n # Instanciate the index\n with Timeit(f\"-> Instanciate the index {index_key}\", indent=2):\n\n # Convert metric_type to faiss type\n metric_type = to_faiss_metric_type(metric_type)\n\n # Get information for one partition\n _, vec_dim = next(read_shapes_local(embeddings_path))\n\n # Instanciate the index\n index = index_factory(vec_dim, index_key, metric_type)\n\n metadata = IndexMetadata(index_key, nb_vectors, vec_dim)\n\n print(\n f\"The index size will be approximately {cast_bytes_to_memory_string(metadata.estimated_index_size_in_bytes())}\"\n )\n\n index_needs_training = check_if_index_needs_training(index_key)\n\n if index_needs_training:\n\n # Extract training vectors\n with Timeit(\"-> Extract training vectors\", indent=2):\n\n memory_available_for_training = cast_bytes_to_memory_string(\n cast_memory_to_bytes(current_memory_available) - metadata.estimated_index_size_in_bytes() * 0.25\n )\n\n # Determine the number of vectors necessary to train the index\n train_size = get_optimal_train_size(nb_vectors, index_key, memory_available_for_training, vec_dim)\n memory_needed_for_training = compute_memory_necessary_for_training(train_size, index_key, vec_dim)\n print(\n f\"Will use {train_size} vectors to train the index, \"\n f\"that will use {cast_bytes_to_memory_string(memory_needed_for_training)} of memory\"\n )\n\n # Extract training vectors\n train_vectors = next(read_embeddings_local(embeddings_path, batch_size=train_size, verbose=True))\n\n # Instanciate the index and train it\n # pylint: disable=no-member\n if use_gpu:\n # if this fails, it means that the GPU version was not comp.\n assert (\n faiss.StandardGpuResources\n ), \"FAISS was not compiled with GPU support, or loading _swigfaiss_gpu.so failed\"\n res = faiss.StandardGpuResources()\n dev_no = 0\n # transfer to GPU (may be partial).\n index = faiss.index_cpu_to_gpu(res, dev_no, index)\n\n with Timeit(\n f\"-> Training the index with {train_vectors.shape[0]} vectors of dim {train_vectors.shape[1]}\", indent=2\n ):\n index.train(train_vectors)\n\n del train_vectors\n\n memory_available_for_adding = cast_bytes_to_memory_string(\n cast_memory_to_bytes(current_memory_available) - metadata.estimated_index_size_in_bytes()\n )\n\n print(\n f\"The memory available for adding the vectors is {memory_available_for_adding}\"\n \"(total available - used by the index)\"\n )\n print(\"Will be using at most 1GB of ram for adding\")\n # Add the vectors to the index.\n with Timeit(\"-> Adding the vectors to the index\", indent=2):\n batch_size = get_optimal_batch_size(vec_dim, memory_available_for_adding)\n print(\n f\"Using a batch size of {batch_size} (memory overhead {cast_bytes_to_memory_string(batch_size*vec_dim*4)})\"\n )\n for vec_batch in read_embeddings_local(embeddings_path, batch_size=batch_size, verbose=True):\n index.add(vec_batch)\n\n # Give standard values for index hyperparameters if possible.\n if any(re.findall(r\"OPQ\\d+_\\d+,IVF\\d+_HNSW\\d+,PQ\\d+\", index_key)):\n set_search_hyperparameters(index, f\"nprobe={64},efSearch={128},ht={2048}\", use_gpu)\n\n # return the index.\n return index\n","sub_path":"autofaiss/external/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":8591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"142915354","text":"from flask import Flask\nfrom flask_restful import Resource, Api\nfrom flask_cors import CORS\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import roc_curve\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\nimport pandas as pd\nimport numpy as np\n\napp = Flask(__name__)\napi = Api(app)\nCORS(app)\n\nclass ROC(Resource):\n \n def get(self, preprocessing, c):\n # you need to preprocess the data according to user preferences (only fit preprocessing on train data)\n # fit the model on the training set\n # predict probabilities on test set\n if preprocessing=='min-max':\n scaler = MinMaxScaler()\n elif preprocessing=='standardization':\n scaler = StandardScaler()\n else:\n return {'error':'choose min-max or standardization for processing'}\n scaler.fit(X_train)\n X_trainScaled = scaler.transform(X_train)\n X_testScaled = scaler.transform(X_test)\n clf = LogisticRegression(C=c)\n clf.fit(X_trainScaled,y_train)\n y_proba = clf.predict_proba(X_testScaled)\n\n fpr, tpr, thres = roc_curve(y_true=y_test,y_score=y_proba[:,1])\n \n res = [{'fpr':fpr[i].item(),'tpr':tpr[i].item(),'threshold':thres[i].item()} for i in range(len(thres))]\n return res\n\napi.add_resource(ROC,'/&')\n# Here you need to add the ROC resource, ex: api.add_resource(HelloWorld, '/')\n# for examples see \n# https://flask-restful.readthedocs.io/en/latest/quickstart.html#a-minimal-api\n\nif __name__ == '__main__':\n # load data\n df = pd.read_csv('data/transfusion.data')\n df = df.rename(columns={'whether he/she donated blood in March 2007':'Donated'})\n\n xDf = df.loc[:, df.columns != 'Donated']\n y = df['Donated']\n # get random numbers to split into train and test\n np.random.seed(1)\n r = np.random.rand(len(df))\n # split into train test\n X_train = xDf[r < 0.8]\n X_test = xDf[r >= 0.8]\n y_train = y[r < 0.8]\n y_test = y[r >= 0.8]\n app.run(debug=True)","sub_path":"flask_roc.py","file_name":"flask_roc.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"170036168","text":"from django.shortcuts import render\nfrom django.shortcuts import HttpResponse\nfrom .alignment import align\nfrom .models import Sequence\nfrom django.utils import timezone\n\ndef sequence_matcher(request):\n if request.method == 'POST':\n sequence = request.POST.get('info', None).upper()\n protein, index = align(sequence)\n if index == -1:\n protein = 'Not Found'\n index = ''\n seq, created = Sequence.objects.get_or_create(title = sequence, index = index, protein = protein)\n seq.timestamp = timezone.now()\n seq.save()\n data = Sequence.objects.all().order_by('timestamp').reverse()\n sequence_dict = {'sequences':data}\n return render(request, 'sequence_matcher.html', sequence_dict)\n","sub_path":"sequence_matcher/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"320399386","text":"import time\nfrom random import randint\nfrom threading import Thread, Lock, Event\n\nmutex1 = Lock()\nmutex2 = Lock()\n\n\nclass BarberShop:\n waitingCustomers = []\n\n def __init__(self, barber, barber2, numberOfSeats):\n self.barber = barber\n self.barber2 = barber2\n self.numberOfSeats = numberOfSeats\n\n def openShop(self):\n print('{0} has opened the Barbershop!'.format(self.barber.name))\n workingThread = Thread(target=self.barberGoToWork, args=(self.barber, mutex1))\n workingThread.start()\n workingThread2 = Thread(target=self.barberGoToWork, args=(self.barber2, mutex2))\n workingThread2.start()\n\n def barberGoToWork(self, barber, mutex):\n while True:\n mutex.acquire()\n\n if len(self.waitingCustomers) > 0:\n c = self.waitingCustomers[0]\n del self.waitingCustomers[0]\n mutex.release()\n barber.cutHair(c)\n else:\n mutex.release()\n print('{0} is sleeping...'.format(barber.name))\n barber.sleep()\n print('Customer-{0} has woken up {1}.'.format(self.waitingCustomers[0], barber.name))\n\n def enterBarberShop(self, customer):\n\n # If there is no one in the barber1 chair, SET barber = barber1\n if self.barber.barberChair == False:\n mutex1.acquire()\n mutex = mutex1\n barber = self.barber\n # If there is no one in the barber2 chair, SET barber = barber2\n elif self.barber2.barberChair == False:\n mutex2.acquire()\n mutex = mutex2\n barber = self.barber2\n # If both barber are busy, set barber = barber1\n else:\n mutex1.acquire()\n mutex = mutex1\n barber = self.barber\n\n # If no one is sitting on barber chair\n if barber.barberChair == False:\n self.waitingCustomers.append(customer)\n mutex.release()\n barber.wakeUp()\n # No seats in waiting room\n elif len(self.waitingCustomers) == self.numberOfSeats:\n print('Barbershop is full, Customer-{0} has left.'.format(customer))\n mutex.release()\n # If someone is in the barber chair and there are seats left in the waiting room\n else:\n print('Both barbers are busy, Customer-{1} is waiting on chair-{2}.'.format(barber.name, customer,\n len(self.waitingCustomers)))\n self.waitingCustomers.append(customer)\n mutex.release()\n barber.wakeUp()\n\n\nclass Barber:\n\n def __init__(self, name):\n self.name = name\n self.barberWorkingEvent = Event()\n self.barberChair = False # Assigns barber chair as occupied (True) or unoccupied (False)\n\n def sleep(self):\n self.barberWorkingEvent.wait()\n\n def wakeUp(self):\n # awaken the thread\n self.barberWorkingEvent.set()\n\n def cutHair(self, customer):\n # Set barber as busy\n self.barberWorkingEvent.clear()\n\n print('Customer-{0} is sitting in {1}\\'s chair.'.format(customer, self.name))\n print('{0} is cutting Customer-{1}\\'s hair.'.format(self.name, customer))\n self.barberChair = True\n durationOfHaircut = randint(5, 25)\n time.sleep(durationOfHaircut)\n print('Customer-{0}\\'s haircut is completed in {1} seconds by {2}.'.format(customer, durationOfHaircut,\n self.name))\n self.barberChair = False\n\n\ndef generate_random_number():\n while len(customers) > 0:\n if randint(10 ** 5 + 1, 10 ** 6) % 4 == 0:\n # New customer enters the barbershop\n barberShop.enterBarberShop(customers.pop())\n time.sleep(1)\n\n\nif __name__ == '__main__':\n numberOfSeats = int(input(\"Enter number of regular chairs (N) : \"))\n\n # customer list\n customers = list(range(6))\n customers.reverse()\n\n SweenyTodd = Barber('Sweeny Todd')\n DavyCollins = Barber('Davy Collins')\n\n barberShop = BarberShop(SweenyTodd, DavyCollins, numberOfSeats)\n barberShop.openShop()\n\n # start random number generator thread\n rand_gen = Thread(target=generate_random_number)\n # allow main program to exit\n rand_gen.daemon = True\n # run thread\n rand_gen.start()\n","sub_path":"PA2/case4.py","file_name":"case4.py","file_ext":"py","file_size_in_byte":4433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"297434240","text":"import discord\nimport random\nimport datetime\nimport pytz\nimport mimetypes\n\nfrom constants import *\n\n\nutc = pytz.UTC\n\n\nclass HouseCupException(Exception):\n pass\n\n\ndef is_mod(user, channel):\n user_is_mod = user.permissions_in(channel).administrator\n role_names = [role.name.lower() for role in user.roles]\n mod_role = \"mod\" in role_names\n return user_is_mod or mod_role or user.id == STUFFLE_ID\n\n\nasync def get_channel_and_message(client, channel_id, message_id):\n channel = client.get_channel(channel_id)\n if not channel:\n raise HouseCupException(\n \"I can't find that channel. Please try again.\")\n\n message = None\n try:\n message = await channel.fetch_message(message_id)\n except Exception:\n raise HouseCupException(\"Invalid message ID. Please try again\")\n if not message:\n raise HouseCupException(\n \"I can't find that message. Please try again.\")\n\n return channel, message\n\n\nasync def pick_winner(text, client):\n args = text.split()[1:]\n proper_format = \"Proper formatting for this function is `~pickwinner MESSADE_ID CHANNEL_ID`\"\n if len(args) != 2:\n raise HouseCupException(proper_format)\n if not args[0].isdigit() or not args[1].isdigit():\n raise HouseCupException(proper_format)\n\n message_id = int(args[0])\n channel_id = int(args[1])\n\n channel, message = await get_channel_and_message(client, channel_id, message_id)\n\n unique_users = []\n reactions = message.reactions\n for reaction in reactions:\n users = await reaction.users().flatten()\n for user in users:\n if user not in unique_users:\n unique_users.append(user)\n\n if len(unique_users) == 0:\n return \"No one reacted to that message, so there is no winner.\"\n\n winner = random.choice(unique_users).name\n return \"The winner is %s!\" % winner\n\n\nasync def delete_history(client, message, all_history=True):\n topic_keyword = \"~deletesomehistory exempt\"\n\n channel = message.channel\n if not is_mod(message.author, channel):\n raise HouseCupException(\"Only mods may run this command.\")\n\n if len(message.mentions) != 1:\n raise HouseCupException(\n \"Mention one user to delete their history.\")\n member = message.mentions[0]\n mention = member.mention\n print(\"Running deletehistory for %s\" % mention)\n\n command_str = \"all\"\n if not all_history:\n command_str = \"some\"\n explanation_str = \"This will delete *every* message by %s.\" % mention\n if not all_history:\n explanation_str = \"This will delete every message by %s, except messages that are pinned or in channels with `%s` in the topic.\" % (\n mention, topic_keyword)\n\n await channel.send(\n \"Running delete %s history for %s. %s\\n\"\n \"I'll let you know when it's complete. \"\n \"This could take a while.\" % (\n command_str, mention, explanation_str))\n\n for channel in message.guild.text_channels:\n save_in_delete_some = channel.topic and (topic_keyword in channel.topic)\n if all_history or not save_in_delete_some:\n try:\n delete_check = lambda msg: msg.author.id == member.id\n if not all_history:\n delete_check = lambda msg: msg.author.id == member.id and not msg.pinned\n await channel.purge(\n limit=None,\n check=delete_check)\n except Exception as ex:\n print(\"Unable to purge %s.\" % channel.name)\n print(str(ex))\n print(\"Deleted history for %s\" % member.name)\n msg = \"Finished running delete history for %s.\" % mention\n return msg\n\n\nasync def clear_channel_now(client, message):\n channel = message.channel\n if not is_mod(message.author, channel):\n raise HouseCupException(\"Only mods may run this command.\")\n await channel.send(\n \"Deleting all messages in this channel that aren't pinned...\")\n\n await channel.purge(\n limit=None,\n check=lambda msg: not msg.pinned)\n\n return \"Deleted non-pinned messages in this channel!\"\n","sub_path":"mod.py","file_name":"mod.py","file_ext":"py","file_size_in_byte":4130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"140699412","text":"class Plate():\n \n def __init__(self, plate_number, alpha_level = 50, resistance_value = 100, progressive_decay = False):\n \n ## Initialize parameters.\n # - Plade ID number.\n self.plate_number = plate_number\n if not type(plate_number) == type(int(1)):\n raise Exception(\"Plate number parameter should be an integer.\")\n \n # - Maximal level at which the plate breaks.\n self.alpha_level = alpha_level\n \n # - Current resistance value, used in progressive decay.\n self.resistance_value = resistance_value\n \n # - Initial resistance value, used in progressive decay. \n self.initial_resistance = resistance_value\n \n # - Progressive decay boolean.\n self.progressive_decay = progressive_decay\n if not type(progressive_decay) == type(True):\n raise Exception(\"Progressive decay parameter should be a boolean.\")\n \n # - Broken/Unbroken status for plate.\n self.broken = False\n \n \n def throw_from_level(self, level):\n \n if not type(level) == type(1):\n raise Exception(\"Level parameter should be an integer!\")\n \n if self.broken:\n # If already broken, raise exception.\n raise Exception(\"Cannot throw a broken plate!\")\n \n elif not self.progressive_decay:\n # If no progressive decay is enabled, the plate breaks if it is thrown from a level higher than its limit.\n self.broken = self.alpha_level <= level\n self.resistance_value = int(not self.broken)*self.resistance_value\n \n elif self.progressive_decay:\n # If progressive decay is enabled, the plate breaks if its resistance value drops to 0\n # The resistance value decreases by a percentage corresponding to the ratio between the \n # plate limit and the thrown level.\n self.broken = level >= self.alpha_level*self.resistance_value/self.initial_resistance\n self.resistance_value = max(0, self.resistance_value - self.initial_resistance*level/self.alpha_level)\n \n return self.broken","sub_path":"Plate.py","file_name":"Plate.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"180003549","text":"from sklearn import tree\nimport numpy as np\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets('MNIST_data',one_hot = False)\ntrain_num = 60000\ntest_num = 10000\n\n#获取训练数据与测试数据\nx_train = mnist.train.images\ny_train = mnist.train.labels\nx_test = mnist.test.images\ny_test = mnist.test.labels\n\nif __name__ == '__main__':\n #构建决策树分类器\n clf = tree.DecisionTreeClassifier()\n #使用训练数据放入分类器中训练\n clf.fit(x_train[:train_num],y_train[:train_num])\n #使用测试数据进行预测\n prediction = clf.predict(x_test[:test_num])\n #计算预测结果与真实结果的准确性\n accurancy = np.sum(np.equal(prediction,y_test[:test_num])) / test_num\n\n print('accurancy:',accurancy)","sub_path":"dt.py","file_name":"dt.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"20931306","text":"from django.conf.urls import url, include\nfrom blog import views\n\nurlpatterns = [\n\turl(r'^$', views.index, name='index'),\n url(r'^posts/$', views.post_list, name='post_list'),\n url(r'^posts/(?P\\d+)$', views.post_detail, name='post_detail'),\n url(r'^posts/new/$', views.post_new, name='post_new'),\n url(r'^posts/edit/(?P\\d+)$', views.post_edit, name='post_new'),\n url(r'^posts/comments/new/(?P\\d+)$', views.comment_new, name='comment_new'),\n url(r'^posts/comments/edit/(?P\\d+)/(?P\\d+)$', views.comment_edit, name='comment_edit'),\n\n url(r'^api/v1/', include('blog.api.v1', namespace='api_v1')),\n]","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"626021505","text":"from django.conf.urls import url\nfrom . import views\n\napp_name = 'project'\n\nurlpatterns = [\n\n url(r'^$', views.index, name='index'),\n url(r'^projects/$', views.projects, name='projects'),\n url(r'^applications/$', views.applications, name='applications'),\n url(r'^register/$', views.register, name='register'),\n url(r'^login_user/$', views.login_user, name='login_user'),\n url(r'^logout_user/$', views.logout_user, name='logout_user'),\n url(r'^(?P[0-9]+)/$',\n views.project_detail, name='project_detail'),\n url(r'^(?P[0-9]+)/application_detail/(?P[0-9]+)/$',\n views.application_detail, name='application_detail'),\n url(r'^(?P[0-9]+)/report/$', views.report, name='report'),\n url(r'^create_project/$', views.create_project, name='create_project'),\n url(r'^(?P[0-9]+)/delete_project/$',\n views.delete_project, name='delete_project'),\n url(r'^(?P[0-9]+)/create_application/$',\n views.create_application, name='create_application'),\n url(r'^(?P[0-9]+)/delete_application/(?P[0-9]+)/$',\n views.delete_application, name='delete_application'),\n url(r'^setChecklist/$', views.setChecklist, name='setChecklist'),\n url(r'^getChecklist/$', views.getChecklist, name='getChecklist'),\n url(r'^setReportlist/$', views.setReportlist, name='setReportlist'),\n url(r'^getReportlist/$', views.getReportlist, name='getReportlist'),\n]\n","sub_path":"project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"472557446","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('store', '0001_initial'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='BuyerDetail',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('phone', models.CharField(max_length=20)),\n ('company_name', models.CharField(max_length=100)),\n ('buyer', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Enquiry',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('product_or_subject', models.CharField(max_length=100)),\n ('description', models.CharField(max_length=500)),\n ('ask_trade', models.BooleanField(default=False)),\n ('status', models.CharField(default=1, max_length=2)),\n ('attachement', models.FileField(max_length=200, null=True, upload_to=b'', blank=True)),\n ('country', models.CharField(max_length=100)),\n ('city', models.CharField(max_length=100)),\n ('read', models.BooleanField(default=False)),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('updated', models.DateTimeField(auto_now=True)),\n ('buyer', models.ForeignKey(related_name='buyer', to=settings.AUTH_USER_MODEL)),\n ('product', models.ForeignKey(blank=True, to='store.Product', null=True)),\n ('supplier', models.ForeignKey(related_name='supplier', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n # migrations.CreateModel(\n # name='Reply',\n # fields=[\n # ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n # ('reply', models.CharField(max_length=500)),\n # ('attachement', models.FileField(max_length=200, null=True, upload_to=b'', blank=True)),\n # ('read', models.BooleanField(default=False)),\n # ('is_quote', models.BooleanField(default=False)),\n # ('created', models.DateTimeField(auto_now_add=True)),\n # ('enquiry', models.ForeignKey(to='dashboard.Enquiry')),\n # ('replier', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n # ],\n # options={\n # },\n # bases=(models.Model,),\n # ),\n ]\n","sub_path":"dashboard/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"158643553","text":"from optimalContolProblem import *\n\nimport numpy as np\nfrom scipy.integrate import trapz\n\n\n\nclass GeneralPowerY(Problem1):\n \"\"\"\n class for the opti-problem:\n J(u,y) = 0.5*||u||**2 + 1/p*(y(T)-yT)**p\n with y' = ay + u\n \"\"\"\n\n def __init__(self,y0,yT,T,a,power,J,grad_J,options=None):\n Problem1.__init__(self,y0,yT,T,a,J,grad_J,options)\n self.power = power\n \n def J_func(u,y,yT,T):\n return J(u,y,yT,T,self.power)\n \n self.J = J_func\n\n def initial_adjoint(self,y):\n \n p = self.power\n return (y - self.yT)**(p-1)\n \nclass CubicY(Problem1):\n \"\"\"\n class for the opti-problem:\n J(u,y) = 0.5*||u||**2 + 1/3*(y(T)-yT)**3\n with y' = ay + u\n \"\"\"\n def __init__(self,y0,yT,T,a,J,grad_J,options=None):\n Problem1.__init__(self,y0,yT,T,a,J,grad_J,options)\n \n def initial_adjoint(self,y):\n \n p = 3\n return (y - self.yT)**(p-1)\n \nif __name__ == '__main__':\n\n from matplotlib.pyplot import *\n\n y0 = 1\n yT = 10\n T = 1\n a = 1\n P = 4\n N=700\n \n def J(u,y,yT,T,power):\n t = np.linspace(0,T,len(u))\n\n I = trapz(u**2,t)\n\n return (0.5*I + (1./power)*(y-yT)**power)\n\n def J2(u,y,yT,T):\n t = np.linspace(0,T,len(u))\n\n I = trapz(u**2,t)\n\n return 0.5*(I + (y-yT)**2)\n\n def grad_J(u,p,dt):\n return dt*(u+p)\n \n problem = GeneralPowerY(y0,yT,T,a,P,J,grad_J)\n problem2 = Problem1(y0,yT,T,a,J2,grad_J)\n\n res1 = problem.plot_solve(N,state=True)\n print \n res2 = problem2.plot_solve(N,state=True)\n\n t = np.linspace(0,T,N+1)\n\n\n plot(t,res1['control'].array())\n plot(t,res2['control'].array(),'r--')\n show()\n\n problem.simple_test(N)\n \n\"\"\"\nterminal> python cubicYfunc.py\n\n--------------m=1--------------\n|lbfgs memory=10| #iterations=9| #iterations/m=9.00\n--------------m=2--------------\n|lbfgs memory=10| #iterations=13| #iterations/m=6.50\n|lbfgs memory=30| #iterations=14| #iterations/m=7.00\n--------------m=4--------------\n|lbfgs memory=10| #iterations=18| #iterations/m=4.50\n|lbfgs memory=30| #iterations=17| #iterations/m=4.25\n--------------m=8--------------\n|lbfgs memory=10| #iterations=31| #iterations/m=3.88\n|lbfgs memory=30| #iterations=22| #iterations/m=2.75\n--------------m=16--------------\n|lbfgs memory=16| #iterations=-1| #iterations/m=-0.06\n|lbfgs memory=48| #iterations=40| #iterations/m=2.50\n--------------m=32--------------\n|lbfgs memory=32| #iterations=-1| #iterations/m=-0.03\n|lbfgs memory=96| #iterations=55| #iterations/m=1.72\n\n\n\"\"\"\n","sub_path":"adjoint/cubicYfunc.py","file_name":"cubicYfunc.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"500953926","text":"#!/usr/bin/env python3\n\nimport json\nimport argparse\nimport codecs\nimport os\nimport socket\nimport subprocess\n\n# Force encoding to UTF-8\nimport locale # Ensures that subsequent open()s\nlocale.getpreferredencoding = lambda _=None: 'UTF-8' # are UTF-8 encoded.\n\nimport sys\n#sys.stdin = open('/dev/stdin', 'r')\n#sys.stdout = open('/dev/stdout', 'w')\n#sys.stderr = open('/dev/stderr', 'w')\n\n# Utility functions for the announce.d files\ndef toUTF8(line):\n return line.decode(\"utf-8\")\n\ndef call(cmdnargs):\n output = subprocess.check_output(cmdnargs)\n lines = output.splitlines()\n lines = [toUTF8(line) for line in lines]\n return lines\n\n# Local used functions\ndef setValue(node,path,value):\n ''' Sets a value inside a complex data dictionary.\n The path Array must have at least one element.\n '''\n key = path[0]\n if len(path) == 1:\n node[key] = value;\n elif key in node:\n setValue(node[key],path[1:],value)\n else:\n node[path[0]] = {}\n setValue(node[key],path[1:],value)\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('-d', '--directory', action='store',\n help='structure directory',required=True)\n\nparser.add_argument('-b', '--batman', action='store',\n help='batman-adv device',default='bat0')\n\nargs = parser.parse_args()\n\noptions = vars(args)\n\ndirectory = options['directory']\nbatadv_dev = options['batman']\n\ndata = {}\n\nfor dirname, dirnames, filenames in os.walk(directory):\n for filename in filenames:\n if filename[0] != '.':\n relPath = os.path.relpath(dirname + os.sep + filename,directory);\n fh = open(dirname + os.sep + filename,'r', errors='replace')\n source = fh.read()\n fh.close()\n value = eval(source)\n setValue(data,relPath.rsplit(os.sep),value)\nprint(json.dumps(data))\n","sub_path":"announce.py","file_name":"announce.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"108856143","text":"class Solution(object):\r\n def findMedianSortedArrays(self, nums1, nums2):\r\n l1, l2 = len(nums1), len(nums2)\r\n if (l1 + l2) % 2 == 1: # odd\r\n return self.getKth(nums1, nums2, int((l1 + l2) / 2) + 1)\r\n else: # even\r\n return (self.getKth(nums1, nums2, int((l1 + l2) / 2)) + \\\r\n self.getKth(nums1, nums2, int((l1 + l2) / 2) + 1)) * 0.5\r\n\r\n def getKth(self, A, B, k):\r\n m, n = len(A), len(B)\r\n if m > n:\r\n return self.getKth(B, A, k)\r\n\r\n l, r = 0, m\r\n while l < r:\r\n mid = int(l + (r - l) / 2)\r\n if 0 <= k - 1 - mid < n and A[mid] >= B[k - 1 - mid]:\r\n r = mid\r\n else:\r\n l = mid + 1\r\n\r\n A1 = A[l - 1] if l - 1 >= 0 else float(\"-inf\")\r\n B1 = B[k - 1 - l] if k - 1 - l >= 0 else float(\"-inf\")\r\n\r\n return max(A1, B1)\r\n\r\n\r\n#print(Solution().findMedianSortedArrays([1, 3], [2]))\r\n","sub_path":"FLAG/4_median_of_two_sorted_arrays.py","file_name":"4_median_of_two_sorted_arrays.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"540789332","text":"\r\n## This process extract CDS information from the contig file\r\n\r\nimport sys, os, re, time\r\nimport numpy as np\r\nfrom multiprocessing import Pool\r\nfrom multiprocessing.pool import ThreadPool\r\n\r\nlistfile, infolder, outfolder = sys.argv[1:]\r\n\r\nstart = time.time()\r\n\r\nmapping = {}\r\nwith open(listfile, 'r') as f: \r\n for line in f:\r\n c = line.strip().rsplit(None,1)\r\n if c[1] not in mapping:\r\n mapping[c[1]] = []\r\n mapping[c[1]].append(c[0])\r\n\r\nprint('%.6f' % (time.time() - start))\r\n\r\n \r\n\r\nre_pattern_complement = re.compile('complement')\r\nre_pattern_join = re.compile('join')\r\nre_pattern_order = re.compile('order')\r\nre_pattern_number = re.compile('\\d+')\r\nre_pattern_segment = re.compile('\\d+\\.\\.\\d+')\r\nre_pattern_singlesite = re.compile('(?:[^\\d\\.]|^)(\\d+)(?=[^\\d\\.]|$)')\r\n\r\ninfiles = list([_ for _ in os.listdir(infolder) if not _.endswith('.log') and os.path.isfile(os.path.join(infolder,_))])\r\n\r\ndef checkLoc(loc):\r\n\t\"\"\"\r\n\tThis function uses a very lazy criteria\r\n\tIn the future, I'd NOT like to update it, because it needs to PARSE the string\r\n\t:param loc:\r\n\t:return:\r\n\t\"\"\"\r\n\r\n\tglobal checkLocFlag\r\n\r\n\tpos_complement = list(re_pattern_complement.finditer(loc))\r\n\tpos_join = list(re_pattern_join.finditer(loc))\r\n\tpos_order = list(re_pattern_order.finditer(loc))\r\n\t# loc = loc.replace('>', '').replace('<', '')\r\n\t# content_number = np.array(map(int, re_pattern_number.findall(loc)))\r\n\r\n\terrList = [\r\n\t\tpos_order,\r\n\t\tlen(pos_complement) > 1,\r\n\t\tlen(pos_join) > 1,\r\n\t\tloc.count(':'),\r\n\t\tpos_join and pos_complement and pos_join[0].start() < pos_complement[0].start(),\r\n\t\t# len(content_number) % 2 == 0,\r\n\t\t# any(content_number[1:] > content_number[:-1]),\r\n\t\t# any(content_number[1:] == content_number[:-1]),\r\n\t\t# any(content_number[::2] > content_number[1::2]),\r\n\t\t# any(content_number[::2] == content_number[1::2]),\r\n\t]\r\n\terrbit = '1'\r\n\tpassbit = ' '\r\n\tcheckLocFlag = ''.join([errbit if _ else passbit for _ in errList])\r\n\r\n\treturn not checkLocFlag.count(errbit)\r\n\r\ndef loadContig(content, i):\r\n\tif i < len(content) and content[i].startswith('ID '): return i+1, content[i][5:].split(';', 1)[0]\r\n\treturn i, ''\r\ndef loadLoc(content, i):\r\n\tif i >= len(content): return i, ''\r\n\tif not content[i].startswith('FT CDS '): return i, ''\r\n\tloc = content[i][21:]; i = i+1\r\n\twhile i < len(content) and content[i].startswith('FT ') and content[i][21] != '/': loc += content[i][21:]; i += 1\r\n\treturn i, loc\r\ndef loadQualifier(content, i):\r\n\tdb_xref_uniprot = []\r\n\tprotein_id = []\r\n\twhile i < len(content):\r\n\t\tc = content[i]\r\n\t\tif c.startswith('FT /db_xref=\"UniProtKB/'):\r\n\t\t\tdb_xref_uniprot.append(c[c.find(':')+1:c.rfind('\"')])\r\n\t\telif c.startswith('FT /protein_id=\"'):\r\n\t\t\tc = c[c.find('\"')+1:-1]\r\n\t\t\tindex = c.find('.')\r\n\t\t\tif index: c = c[:index]\r\n\t\t\tprotein_id.append(c)\r\n\t\telse: break\r\n\t\ti += 1\r\n\treturn i, db_xref_uniprot, protein_id\r\n\r\ndef parseLoc(loc):\r\n\tif re_pattern_complement.search(loc):\r\n\t\tstrand = '-'\r\n\telse:\r\n\t\tstrand = '+'\r\n\r\n\tloc = loc.replace('>', '').replace('<', '')\r\n\tpos = list(map(int, re_pattern_number.findall(loc)))\r\n\tsegments = re_pattern_segment.findall(loc)\r\n\tsinglesites = list(map(int, re_pattern_singlesite.findall(loc)))\r\n\tflag = len(pos) == len(segments) * 2 + len(singlesites)\r\n\tif not flag: print(loc, file=sys.stderr)\r\n\tassert flag\r\n\r\n\tlength = len(singlesites) + len(segments)\r\n\tsegments = [__ for _ in segments for __ in _.split('..')]\r\n\tsegments = list(map(int, segments))\r\n\tlength += sum(e - s for s, e in zip(segments[::2], segments[1::2]))\r\n\r\n\tstart = min(pos)\r\n\tend = max(pos)\r\n\r\n\treturn list(map(str, [strand, start, end, length]))\r\n\r\n# maxloc = 0\r\n# for nf, infile in enumerate(infiles[120:]):\r\ndef func(argss):\r\n\tnf, infile = argss\r\n\tprint('%d\\t%s' % (nf, infile))\r\n\r\n\twith open(infolder + '/' + infile, 'r') as f: content = f.read().strip().split('\\n')\r\n\r\n\ti = 0\r\n\twith open(outfolder + '/' + infile, 'w') as f:\r\n\t\twhile i < len(content):\r\n\t\t\ti, contig = loadContig(content, i)\r\n\t\t\tif contig == None: break\r\n\t\t\tif not contig: print('ERROR null contig\\t%s' % (infile), file=sys.stderr); return;\r\n\t\t\twhile True:\r\n\t\t\t\ti, loc = loadLoc(content, i)\r\n\t\t\t\tif not loc: break\r\n\t\t\t\t# if len(loc) > maxloc: maxloc = len(loc); print '%d\\t%s' % (maxloc, loc)\r\n\t\t\t\ti, db_xref_uniprot, protein_id = loadQualifier(content, i)\r\n\t\t\t\tif not protein_id: print('ERROR no protein_id\\t%s\\t%d' % (infile, i), file=sys.stderr); continue\r\n\t\t\t\tif len(db_xref_uniprot) > 1: print('WARNING multiple xref\\t%s\\t%d\\t%d' % (infile, i, len(db_xref_uniprot)), file=sys.stderr); continue\r\n\t\t\t\tif len(protein_id) > 1: print('WARNING multiple protein_id\\t%s\\t%d\\t%d' % (infile, i, len(protein_id)), file=sys.stderr); continue\r\n\t\t\t\tif not checkLoc(loc): print('WARNING ill loc\\t%s\\t%d\\t%s\\t%s' % (infile, i, checkLocFlag, loc), file=sys.stderr); continue\r\n\t\t\t\tstrand, start, end, length = parseLoc(loc)\r\n\t\t\t\tfor pro_id in protein_id:\r\n\t\t\t\t\tf.write('\\n'.join('\\t'.join([_, contig + strand, start, end, length]) for _ in mapping.get(pro_id, [])) + '\\n')\r\n\r\n\tprint('%d\\t%s done' % (nf, infile))\r\n# print maxloc\r\npool = Pool(10)\r\npool.map(func, enumerate(infiles))\r\npool.close()\r\npool.join()\r\n# func([0, infiles[0]])\r\n# func([0, infiles[100]])\r\n\r\nprint('coverage')\r\nfor prefix in ['contig', 'wgs']:\r\n\tprint(prefix)\r\n\tfile_start, file_end = [], []\r\n\tfor infile in infiles:\r\n\t\tt = infile.split('_')\r\n\t\tif t[0] != prefix: continue\r\n\t\tfile_start.append(int(t[1]))\r\n\t\tfile_end.append(int(t[2]))\r\n\tfile_start, file_end = list(map(set, [file_start, file_end]))\r\n\tintersection = file_start & file_end\r\n\tfile_start -= intersection\r\n\tfile_end -= intersection\r\n\tfile_start, file_end = list(map(sorted, list(map(list, [file_start, file_end]))))\r\n\tfor b in zip(file_start, file_end):\r\n\t\tprint('\\t'.join(map(str, b)))\r\n","sub_path":"Genomic_distance_library_generating/source_file/util/extract_CDS_parallel.py","file_name":"extract_CDS_parallel.py","file_ext":"py","file_size_in_byte":5815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"528137546","text":"# Given a binary tree, return the bottom-up level order traversal of its nodes' values. \n# (ie, from left to right, level by level from leaf to root).\n\nfrom typing import List\nimport TreeNode\n\ndef dfs(root: TreeNode, level: int, result: List[int]) -> None:\n if root:\n if len(result) < level + 1:\n result.insert(0, [])\n result[len(result) - 1 - level].append(root.val)\n dfs(root.left, level + 1, result)\n dfs(root.right, level + 1, result)\n\ndef levelTraversal(root: TreeNode) -> List[List[int]]:\n result = []\n dfs(root, 0,result)\n return result","sub_path":"Easy/107. BSTLevelTraversal.py","file_name":"107. BSTLevelTraversal.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"627069954","text":"\"\"\"\nUtilities for creating and sending emails with Google's API.\n\"\"\"\nimport base64\nimport os\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.image import MIMEImage\nfrom email.mime.audio import MIMEAudio\nfrom email.mime.base import MIMEBase\nimport mimetypes\nfrom apiclient import errors\nfrom oauth2client import file as oauth_file, client, tools\n\n\ndef create_message_with_attachment(sender, to, subject, message_text, file):\n \"\"\"Create a message for an email.\n\n Args:\n sender: Email address of the sender.\n to: Email address of the receiver.\n subject: The subject of the email message.\n message_text: The text of the email message.\n\n Returns:\n An object containing a base64url encoded email object.\n \"\"\"\n message = MIMEMultipart()\n message['to'] = to\n message['from'] = sender\n message['subject'] = subject\n\n msg = MIMEText(message_text)\n message.attach(msg)\n\n content_type, encoding = mimetypes.guess_type(file)\n\n if content_type is None or encoding is not None:\n content_type = 'application/octet-stream'\n main_type, sub_type = content_type.split('/', 1)\n if main_type == 'text':\n fp = open(file, 'rb')\n msg = MIMEText(fp.read(), _subtype=sub_type)\n fp.close()\n elif main_type == 'image':\n fp = open(file, 'rb')\n msg = MIMEImage(fp.read(), _subtype=sub_type)\n fp.close()\n elif main_type == 'audio':\n fp = open(file, 'rb')\n msg = MIMEAudio(fp.read(), _subtype=sub_type)\n fp.close()\n else:\n fp = open(file, 'rb')\n msg = MIMEBase(main_type, sub_type)\n msg.set_payload(fp.read())\n fp.close()\n\n filename = os.path.basename(file)\n msg.add_header('Content-Disposition', 'attachment', filename=filename)\n message.attach(msg)\n return {'raw': base64.urlsafe_b64encode(message.as_string().encode()).decode()}\n\n\ndef send_message(service, user_id, message):\n \"\"\"Send an email message.\n\n Args:\n service: Authorized Gmail API service instance.\n user_id: User's email address. The special value \"me\"\n can be used to indicate the authenticated user.\n message: Message to be sent.\n\n Returns:\n Sent Message.\n \"\"\"\n try:\n message = (service.users().messages().send(userId=user_id, body=message)\n .execute())\n print('Message Id: %s' % message['id'])\n return message\n except (errors.HttpError) as error:\n print('An error occurred: %s' % error)\n\n\ndef get_creds(tokenfile, credsfile, scopes='https://www.googleapis.com/auth/gmail.send'):\n \"\"\"\n Either retrieve stored token or get authorization.\n \"\"\"\n store = oauth_file.Storage(tokenfile)\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets(credsfile, scopes)\n creds = tools.run_flow(flow, store)\n return creds\n","sub_path":"cvac/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"43661112","text":"from socket import *\n\nfrom time import sleep, ctime\n\ns = socket()\ns.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\ns.bind(('127.0.0.1', 8888))\ns.listen(3)\n\n# 将套接字设置超时间\ns.settimeout(5)\n\nwhile True:\n print(\"waiting for connection...\")\n try:\n c, addr = s.accept()\n except timeout: # timeout\n print(ctime())\n continue\n else:\n print(\"connect from :\", addr)\n while True:\n data = c.recv(1024).decode()\n if not data:\n break\n print(data)\n c.send(ctime().encode())\n c.close()\n s.close()\n","sub_path":"pythonNet/day3/timeout.py","file_name":"timeout.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"632681362","text":"# import argparse\n\"\"\"\n ASCII Encoder\n\"\"\"\n\ndef to_ascii(string):\n \"\"\"\n Converts a string into its ascii value\n \"\"\"\n out = \"\"\n\n for char in string:\n ascii = ord(char)\n ascii = bin(ascii)\n char = ascii[2:].zfill(8)\n out = out + char\n\n return out\n\ndef to_str(string):\n \"\"\"\n Converts ascii to string literal\n \"\"\"\n out = \"\"\n\n for i in range(0, len(string), 8):\n ascii = int(string[i:i+8], 2)\n out = out + chr(ascii)\n\n return out\n\ndef encode(filename):\n \"\"\"\n Encode a file\n \"\"\"\n file_in = open(filename, 'r+')\n contents = file.readlines()\n\n for line in contents:\n file_in.write(to_ascii(line))\n\n file_in.close(file_in)\n \ndef main():\n \"\"\"\n Main function\n \"\"\"\n ''' parser = argparse.ArgumentParser()\n encoder = parser.add_mutually_exclusive_group()\n\n encoder.add_argument(\"-e\", action=\"store_true\", help=\"encode flag\")\n encoder.add_argument(\"-d\", action=\"store_true\", help=\"decode flag\")\n\n # group.add_argument(\"-v\", \"--verbose\", action=\"store_true\")\n # group.add_argument(\"-q\", \"--quiet\", action=\"store_true\")\n\n parser.add_argument(\"file\", type=str, help=\"file to encode/decode\")\n\n base = parser.add_argument_group(\"required arguments\")\n base.add_argument(\"-b\", type=int, required=True,\n help=\"number base to encode/decode\")\n\n args = parser.parse_args()\n '''\n\n file_in = open(\"test.txt\", \"r+\")\n content = file_in.readlines()\n \n file_in.seek(0)\n file_in.truncate()\n \n for line in content:\n # file_in.write(to_ascii(line))\n file_in.write(to_str(line))\n\n file_in.close()\n\nif __name__ == '__main__':\n main()\n","sub_path":"encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"10576352","text":"#! /usr/bin/env python\n# -*- coding=utf8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nIMAGE_SIZE = 64\n\n\ndef build_train_input_fn(opts, train_data_path):\n def train_input_fn():\n ds = create_image_dataset(train_data_path)\n num_parallel = opts.map_num_parallel_calls\n ds = ds.map(\n lambda filename: parse_function(filename, opts),\n num_parallel_calls=num_parallel)\n ds = ds.prefetch(opts.prefetch_size)\n if opts.shuffle_batch:\n ds = ds.shuffle(buffer_size=opts.shuffle_size)\n ds = ds.repeat().batch(opts.batch_size)\n\n return ds\n return train_input_fn\n\n\ndef create_image_dataset(data_path):\n img_paths = read_txt_file(data_path)\n img_paths = tf.convert_to_tensor(img_paths, dtype=tf.string)\n ds = tf.data.Dataset.from_tensor_slices((img_paths))\n return ds\n\n\ndef read_txt_file(txt_file):\n \"\"\"Read the content of the text file and store it into lists.\"\"\"\n\n img_paths = []\n with open(txt_file, 'r') as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip()\n items = line.split(' ')\n img_paths.append(items[0])\n return img_paths\n\n\ndef parse_function(img_path, opts):\n image_string = tf.read_file(img_path)\n image_decoded = tf.image.decode_image(image_string, channels=None)\n image_decoded = image_decoded[:, :, :opts.nc]\n image_decoded.set_shape([None, None, opts.nc])\n image = tf.cast(image_decoded, tf.float32)\n\n img_size = opts.img_size\n smallest_side = tf.random_uniform(\n [],\n minval=img_size,\n maxval=img_size+1,\n dtype=tf.int32)\n smallest_side = tf.to_float(smallest_side)\n height, width = tf.shape(image)[0], tf.shape(image)[1]\n height = tf.to_float(height)\n width = tf.to_float(width)\n scale = tf.cond(tf.greater(height, width),\n lambda: smallest_side / width,\n lambda: smallest_side / height)\n new_height = tf.to_int32(height * scale)\n new_width = tf.to_int32(width * scale)\n resized_image = tf.image.resize_images(image, [new_height, new_width])\n crop_image = tf.random_crop(resized_image, [img_size, img_size, opts.nc])\n norm_image = crop_image / 127.5 - 1.0\n\n return {'data': norm_image}\n\n\ndef invert_norm(x):\n return (x + 1.0) * 127.5\n","sub_path":"vision/gan/wgan-gp/input_data.py","file_name":"input_data.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"127320476","text":"#!/usr/bin/python2\n# coding=utf-8\n\nimport cv2\nfrom pyzbar import pyzbar\nimport zbar\nimport numpy as np\nimport pickle\nimport matplotlib.pyplot as plt\nimport time\nimport math\nimport urllib\nimport socket\nimport signal\nimport threading\n##import LSC_Client\nimport PIL\nimport Image\nfrom LSC_Client import LSC_Client\n\nfrom test_VERSIOND_DETECT import cX\nfrom test_VERSIOND_DETECT import cY\nfrom test_VERSIOND_DETECT import MaxArea\nimport simple_barcode_detection\nimport test_VERSIOND_DETECT as detect_qr\n\nip_port_sonar = ('127.0.0.1', 9030)\nsock_sonar = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock_sonar.connect(ip_port_sonar) # 连接到超声波距离服务器以获取距离\n\nip_port_sonarx = ('127.0.0.1', 9090)\nsock_sonarx = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock_sonarx.connect(ip_port_sonarx) # 连接到超声波距离服务器以获取距离\n\ndistance = 0.0\nstep = -1\nRunning = True\n\nlsc = LSC_Client()\n\n\n##lscx = LSC_Client.LSC_Client() #摄像头\n\n### create a reader\n##scanner = zbar.ImageScanner()\n##\n### configure the reader\n##scanner.parse_config('enable')\n##font=cv2.FONT_HERSHEY_SIMPLEX\n##camera = cv2.VideoCapture(\"http://127.0.0.1:8080/?action=stream?dummy=param.mjpg\")\n##camera = urllib.urlopen(\"http://127.0.0.1:8080/?action=stream?dummy=param.mjpg\")\n##bytes = ''\n##pic = 0\n##start_time1 = 0\n##start_time2 = 0\n\n# 数值映射\ndef leMap(x, in_min, in_max, out_min, out_max):\n return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min\n\n\n##从服务器接收超声波距离的数据\ndef updateDistance():\n global sock_sonar\n global distance\n\n while True:\n rcv = sock_sonar.recv(1024)\n if rcv == b'':\n distance = 0.0\n break;\n else:\n if Running is True:\n st = rcv.strip() # 去除空格\n try:\n distance = float(st) # 将字符串转为浮点数\n except Exception as e:\n print(e)\n distance = 0.0\n\n\n# 启动距离更新线程\nth1 = threading.Thread(target=updateDistance)\nth1.setDaemon(True)\nth1.start()\n\n\n\n#################从服务器接收超声波距离的数据\ndef updateDistancex():\n global sock_sonar\n global DISTANCE\n\n while True:\n rcv = sock_sonarx.recv(1024)\n if rcv == b'':\n DISTANCE = 0.0\n break;\n else:\n if Running is True:\n st = rcv.strip() # 去除空格\n try:\n DISTANCE = float(st) # 将字符串转为浮点数\n except Exception as e:\n print(e)\n DISTANCE = 0.0\n\n\n# 启动距离更新线程\nth10 = threading.Thread(target=updateDistancex)\nth10.setDaemon(True)\nth10.start()\n##################################\n\n\n# 心跳,\ndef Heartbeat():\n while True:\n time.sleep(3)\n try:\n sock_sonar.sendall(\"3\")\n except:\n continue\n\n\n# 启动心跳线程\nth2 = threading.Thread(target=Heartbeat)\nth2.setDaemon(True)\nth2.start()\n\n\ndef runDetect():\n detect_qr.detect()\n\n\nth3 = threading.Thread(target=runDetect)\nth3.setDaemon(True)\nth3.start()\n\n##time_start = time.time()\n\nlsc.RunActionGroup(0, 1)\npitch = 1500\nyaw = 1500\nlsc.MoveServo(19, 2500, 1000) # 让摄像头云台的两个舵机都转动到中间位置\nlsc.MoveServo(20, 1500, 1000)\n\nwhile True:\n if Running is True:\n ## time_elapsed = time.time() - time_start\n ## if time_elapsed > 500:\n ## print(time_elapsed)\n ## Running = False\n ## break\n ## else:\n try:\n\n ####################先前动作,定位\n if step == -1:\n #寻找\n if MaxArea > 5000:\n ##面积适中,找到白色区域\n #调整蜘蛛六足\n while True:\n lsc.RunActionGroup(4, 1) #执行一次右转,然后继续判定MaxArea是否超过50,否的话停止\n lsc.WaitForFinish(3000) # 等待执行完毕\n if MaxArea<=5000:\n break\n time.sleep(0.1)\n step=0\n else :\n lsc.RunActionGroup(4, 2) # 运行4号动作在,低姿态右转动作执行16次\n lsc.WaitForFinish(5000) # 等待执行完毕\n step=-1\n #######################\n\n if step == 0:\n lsc.RunActionGroup(1, 0) # 动作组1, 低姿态前进\n step = 1 # 转到步骤1\n\n # back or not?\n elif step == 1:\n if distance > 15 and distance <= 40: # 超声波距离小于30CM\n lsc.StopActionGroup() # 停止正在执行的动作组\n step = 2 # 转到步骤2\n elif distance <= 15 and distance > 0: # back and stay step1\n lsc.StopActionGroup() # 停止正在执行的动作组\n time.sleep(0.1)\n lsc.RunActionGroup(2, 1) # 小于15就后退,去远离目标\n lsc.WaitForFinish(3000) # 等待执行完毕\n step = 1 # 转到步骤1\n elif step == 2:\n lsc.RunActionGroup(4, 11) # 运行4号动作在,低姿态右转动作执行16次\n lsc.WaitForFinish(20000) # 等待执行完毕\n step = 3 # 转到步骤3\n elif step == 3:\n step = 0 # 回到步骤0\n else:\n pass\n time.sleep(0.1)\n except Exception as e:\n print(e)\n break\n else: # Running 是False, 程序被暂停,什么都不做\n time.sleep(0.1)\n\n","sub_path":"test_VERSION.py","file_name":"test_VERSION.py","file_ext":"py","file_size_in_byte":5821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"623094663","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 23 14:23:18 2018\n\n@author: Zhe Li\n\"\"\"\n\nimport scipy\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\n\ndef vonmisespdf(a, b, resol=100):\n theta = np.linspace(0, 2*np.pi, num=resol, endpoint=False)\n kappa = (a**2+b**2)**0.5\n p = np.exp(a[:, :, None]*np.cos(theta)+b[:, :, None]*np.sin(theta))/scipy.special.iv(0, kappa[:, :, None])\n return p/resol\n\ndef vonmisesKL(p_a, p_b, q_a, q_b):\n p = vonmisespdf(p_a, p_b)\n q = vonmisespdf(q_a, q_b)\n D = np.sum(p*np.log(p/q), axis=-1)\n return D\n\nclass GNN_Dataset(Dataset):\n # numpy arrays\n # inputs shape: sample_num*T*N*Di\n # targets shape: sample_num*T*N*Dr\n def __init__(self, inputs, targets, train_start, test_start, J_true):\n super(GNN_Dataset, self).__init__()\n sample_num, T, N, Di = inputs.shape\n assert targets.shape[0]==sample_num, 'sample number inconsistent'\n assert targets.shape[1]==T, 'sequence duration inconsistent'\n assert targets.shape[2]==N, 'unit number inconsistent'\n assert train_start T*batch_size*N*Di\n# targets shape: batch_size*T*N*Do -> T*batch_size*N*Do\ndef transformed_data(inputs, targets,\n device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')):\n inputs = inputs.permute(1, 0, 2, 3).contiguous().to(device, torch.float)\n targets = targets.permute(1, 0, 2, 3).contiguous().to(device, torch.float)\n return inputs, targets\n","sub_path":"python/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"93775915","text":"#!/usr/bin/env python3\n\"\"\"\nxmlslurper reads in an xml formatted file and converts it to a python dictionary\n\"\"\"\nimport xml.parsers.expat\n\nclass Xmlslurper:\n \"\"\"\n Class to read in an xml formatted file and convert the table contents to a python dictionary\n\n takes the filename and the expected table names as inputs\n \"\"\"\n def __init__(self, filename, tablenames):\n self.data = {}\n\n\n ##################################################################\n def start_element(name, attrs, data=self.data):\n \"\"\" Overrides the xml parser start element routine, specifically looks\n for table, field, tr, and td tags, ingoring all others\n\n Parameters\n ----------\n name : str\n The name of the tag\n attrs : dict\n Any tag attributes\n data : dict\n The current data structure\n\n \"\"\"\n if name.upper() == 'TABLE':\n # skip if not one of the desired tables\n if not attrs['name'] in self.data['wanted_tables']:\n return\n\n # initialize values for a new table\n data['curtable'] = attrs['name']\n data['fieldnames'] = []\n data['fieldtypes'] = []\n data['fieldarray'] = []\n data['tables'][data['curtable']] = []\n\n if name.upper() == 'FIELD' and self.data['curtable']:\n # save description information\n data['fieldnames'].append(attrs['name'].lower())\n data['fieldtypes'].append(attrs['datatype'])\n data['fieldarray'].append(attrs.get('arraysize', None))\n\n if name.upper() == 'TR':\n # new row, inialize row values\n data['col'] = 0 # current column\n data['prevcol'] = 0 # previous column\n data['prevtext'] = '' # previous text parsed in case partial due to buffer\n data['currow'] = {} # dictionary to store info from row\n\n if name.upper() == 'TD':\n # save state that are in a TD section\n data['in_TD'] = True\n\n\n ##################################################################\n def end_element(name, data=self.data):\n \"\"\" Overrides the xml parser end element routine, specifically looks\n for table, tr, and td tags, ingoring all others\n\n Parameters\n ----------\n name : str\n The name of the tag\n data : dict\n The current data structure\n \"\"\"\n if name.upper() == 'TD':\n # if closed TD section, change TD state\n data['col'] += 1\n data['in_TD'] = False\n\n if name.upper() == 'TR' and data['curtable']:\n # save current row dictionary to current table\n data['tables'][data['curtable']].append(data['currow'])\n\n if name.upper() == 'TABLE' and data['curtable']:\n # empty table variables\n data['curtable'] = None\n del self.data['fieldnames']\n del self.data['fieldtypes']\n del self.data['fieldarray']\n\n\n ##################################################################\n def char_data(text, data=self.data):\n \"\"\" Overrides the xml parser char_data routine. It converts the\n contents of tags into their expected data type and format\n\n Parameters\n ----------\n test : str\n The contents of the current tag\n data : dict\n The current data structure\n\n \"\"\"\n prevtext = text\n\n if data['in_TD'] and self.data['curtable']:\n # if still same column, need to join with previous data\n if data['prevcol'] == data['col']:\n text = data['prevtext'] + text\n\n curarrsize = data['fieldarray'][data['col']]\n curtype = data['fieldtypes'][data['col']]\n curname = data['fieldnames'][data['col']]\n\n if curarrsize is not None and curtype != 'char':\n # data is for an array field\n # assumes array cannot be of strings\n\n # so split into separate values\n vals = text.strip().split()\n\n # convert values to right type\n if curtype == 'int':\n for i, val in enumerate(vals):\n vals[i] = int(val)\n elif curtype == 'float':\n for i, val in enumerate(vals):\n vals[i] = float(val)\n\n # save data array to current row data\n data['currow'][curname] = vals\n else:\n # single value, convert to right type\n if curtype == 'int':\n val = int(text)\n elif curtype == 'float':\n val = float(text)\n else:\n val = text\n\n # save data array to current row data\n data['currow'][curname] = val\n\n # save state for next char_data call\n data['prevtext'] = prevtext\n data['prevcol'] = data['col']\n\n\n ##################################################################\n # actual code for __init__\n\n # initialize values\n # which tables we want values from\n self.data['wanted_tables'] = tablenames\n\n # name of table currently parsing\n self.data['curtable'] = None\n self.data['params'] = {}\n\n # dictionary of tables\n # tables are arrays of row dict\n self.data['tables'] = {}\n self.data['in_TD'] = False # whether in TD section or not\n\n p = xml.parsers.expat.ParserCreate()\n #p.buffer_size=32768\n p.buffer_size = 2048\n\n # assign functions to handler\n p.StartElementHandler = start_element\n p.EndElementHandler = end_element\n p.CharacterDataHandler = char_data\n\n fl = open(filename, \"rb\")\n p.ParseFile(fl)\n fl.close()\n\n #\n # clean out our bookkeeping\n #\n del self.data['curtable']\n del self.data['wanted_tables']\n del self.data['in_TD']\n\n\n ##################################################################\n def gettables(self):\n \"\"\" Method to get the full contents of the data table structure\n\n Returns\n -------\n Dict containing the current table data\n\n \"\"\"\n return self.data['tables']\n\n #\n # we look like our data member...\n #\n def __getattr__(self, blah):\n return getattr(self.data['tables'], blah)\n\n\n\n\nif __name__ == \"__main__\": # pragma no coverage\n tablelist = (\"Astrometric_Instruments\",\n \"FGroups\",\n \"Fields\",\n \"Photometric_Instruments\",\n \"PSF_Extensions\",\n \"PSF_Fields\",\n \"Warnings\")\n import sys\n import glob\n import pprint\n pp = pprint.PrettyPrinter(indent=4)\n if len(sys.argv) > 1:\n pp.pprint(Xmlslurper(sys.argv[1], tablelist).gettables())\n else:\n for f in glob.glob('*.xml'):\n print(\"f: \", f)\n pp.pprint(Xmlslurper(f, tablelist).gettables())\n","sub_path":"python/despymisc/xmlslurp.py","file_name":"xmlslurp.py","file_ext":"py","file_size_in_byte":7690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"14051635","text":"#!/usr/bin/env python3\n\nimport itertools\nfrom src.day5 import part2\n\n# Amp: A, B, C, D, E\n# phase setting: 0, 1, 2, 3 4 (not sure which amp gets which, but each use one)\n# input: 0 (for amp A), amp B takes amp A's output as input etc.\n# each amp runs the test input fresh\n\n\ndef get_phase_setting_combinations(range_from, range_to):\n # e.g. for 0, 1, 2 = 0,1,2 ; 1,0,2 ; 2,0.1\n number_range = range(range_from, range_to)\n return list(itertools.permutations(number_range, len(number_range)))\n\n\ndef amplification(input):\n number_of_amps = 5\n phase_settings = get_phase_setting_combinations(0, number_of_amps)\n max_result = 0\n for setting in phase_settings:\n user_input = 0\n result = 0\n for amp in range(0, number_of_amps):\n result = part2.diagnostic_program(input, [setting[amp], user_input])\n user_input = result\n if result > max_result:\n max_result = result\n return max_result\n\n\nif __name__ == '__main__':\n with open('input', 'r') as file:\n input = file.readlines()[0]\n print('Result: ' + str(amplification(list(map(int, input.split(\",\"))))))\n","sub_path":"src/day7/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"8172187","text":"from operator import itemgetter, attrgetter\r\n\r\n\r\ndef tupleSeparator(string):\r\n # Split the string into a list with a delimiter of -\r\n # Each item in the list will have the name, age, and score all within a single string\r\n stringList = string.split(\"-\")\r\n # Initialize a tupleList\r\n # We will be dividing each string within the list into tuples\r\n tupleList = []\r\n\r\n # for the length of the stringList\r\n for i in range(len(stringList)):\r\n # make a temporary list to split each string within the stringList\r\n tempList = stringList[i].split(\",\")\r\n # make the string into a tuple\r\n tupleInfo = (tempList[0], tempList[1], tempList[2])\r\n # attach the tuple made into the list of tuples\r\n tupleList.append(tupleInfo)\r\n\r\n # when finished return the tupleList, sorted by name, age, then score\r\n return sorted(tupleList, key=itemgetter(0, 1, 2))\r\n","sub_path":"ITP 499 Assignments/ITP499_a2_Kim_Minjoo/ITP499 Asn #2-3.py","file_name":"ITP499 Asn #2-3.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"124316496","text":"import pandas as pd\n\n#Create Test Data Sets\n# df1 = pd.DataFrame({'StudentID': ['A1','A2','A7'],\n# \t\t\t\t\t'B': ['B1','B2','B3'],\n# \t\t\t\t\t'C': ['C1','C2','C3'],\n# \t\t\t\t\t})\n\n# df2 = pd.DataFrame({'B': ['B3','B4','B5','B0','B6'],\n# \t\t\t\t\t'StudentID': ['A1','A2','A2','A5','A7'],\n# \t\t\t\t\t'C': ['C1','C3','C4','C0','C5'],\n# \t\t\t\t\t})\n\n\n\n\ndef merge_data(df1,df2,identifier_string):\n\t#Merge Two Data Sets to Indicate which individuals show up in the new data set\n\tdfMerge = df1.merge(df2,how='left',on=identifier_string,indicator=True)\n\n\t#Reshape this merged file to include all individuals who have an observation in the new data set \n\tnewData1 = dfMerge[dfMerge.columns[0:(len(df1.columns))]]\n\tnewData2 = pd.concat([dfMerge[identifier_string],dfMerge[dfMerge.columns[(len(df1.columns)):len(dfMerge.columns)-1]]],axis=1)\n\n\t#Put Student ID on first\n\tcols = newData1.columns.tolist()\n\tcols = [cols[-1]]+cols[:-1] # or whatever change you need\n\tnewData1 = newData1.reindex(columns=cols)\n\n\tnewData1.columns = range(newData1.shape[1])\n\tnewData2.columns = range(newData2.shape[1])\n\n\tnewData3 = newData1.append(newData2)\n\n\t#Gives all individuals who have an observation in the new data set\n\tfinalNewEntries = newData3.drop_duplicates()\n\n\t#Gives old + new data\n\tfullData = df1.append(df2)\n\tcols = fullData.columns.tolist()\n\tcols = [cols[-1]]+cols[:-1] # or whatever change you need\n\tfullData = fullData.reindex(columns=cols)\n\n\tfinalNewEntries.columns = fullData.columns\n\n\t#Gives all individuals in the old data which don't have an observation in the new data\n\tfinalOldEntries = pd.concat([fullData,finalNewEntries]).drop_duplicates(keep=False)\n\tfinalOldEntries.columns = fullData.columns\n\n\t#Generates variable 'newflag'\n\tfinalOldEntries['newflag'] = 0\n\tfinalNewEntries['newflag'] = 1\n\n\t#Generates final data with 'newflag'\n\tFinalData = pd.concat([finalNewEntries,finalOldEntries])\n\n\t#Prints out data\n\treturn(FinalData)\n\ndf1 = pd.read_csv('/users/mwolff/desktop/Lipscomb.csv').dropna(how='all')\ndf2 = pd.read_csv('/users/mwolff/desktop/Lipscomb_old.csv').dropna(how='all')\n\n\ndata = merge_data(df1,df2,'StudentID')\n\nprint(data)\n\n\n\n\n","sub_path":"lipscomb/merge_test.py","file_name":"merge_test.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"198497135","text":"pharase = \"Don't panic!\"\nplist = list(pharase)\nprint(pharase)\nprint(plist)\nfor nums in range (5):\n plist.pop()\nplist.remove(' ')\nplist.pop(0)\nplist.pop(2)\nplist.insert(2, ' ')\nplist.insert(4, 'a')\nnew_pharas = ''.join(plist)\nprint(plist)\nprint(new_pharas)\n","sub_path":"HeadFirst/panic.py","file_name":"panic.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"107308379","text":"#!/usr/bin/env python\n\n'''\nAuthor: Li Yinkai\nEmail: yinkai.li@foxmail.com\nBlog: https://blog.51cto.com/yinkai\n\nDate: 2019/7/21 上午11:14\n'''\n\nimport datetime\nimport time\n\ndef logger(fn):\n def wrapper(*args, **kwargs):\n # print(\"Call function: {}. \\nThe args: x = {}, y = {}\\n\".format(fn.__name__, *args))\n # before 功能增强\n start = datetime.datetime.now()\n ret = fn(*args, **kwargs)\n # after 功能增强\n duration = (datetime.datetime.now() - start).total_seconds()\n print(\"Function {} took {}s.\".format(fn.__name__, duration))\n if duration > 5:\n print(\"So slow! \")\n else:\n print(\"So fast! \")\n print(\"=\" * 50)\n return ret\n return wrapper\n\n@logger # 装饰器语法,把装饰器下边的标识符提上来,作为该装饰器函数的参数,并且将返回值重新覆盖给该标识符\ndef sub(x, y):\n time.sleep(6)\n return x - y\n\n@logger # @logger 等价于: add = logger(add) => wrapper(x, y)\ndef add(x, y):\n time.sleep(2)\n return x + y\n\n\nret1 = add(5,4) # => add = logger(add)(5, 4) => wrapper(5, 4)\nret2 = sub(5,4)\nprint('Result = {}'.format(ret1))\nprint('Result = {}'.format(ret2))\n","sub_path":"Decorator/decorator-3.py","file_name":"decorator-3.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"379981102","text":"import argparse\nimport csv\nimport logging\nimport os\nimport sys\nfrom pathlib import Path\n\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm.session import Session\n\nfrom word_way.app import create_app\nfrom word_way.scrapping.word import save_word\nfrom word_way.context import create_session\nfrom word_way.models import SynonymsWordRelation\n\nparser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n)\nparser.add_argument('-c', '--config', type=str,\n default=str(os.environ.get('WORD_WAY_ENV', 'prod')))\nparser.add_argument(\n '-l', '--line', type=int, default=0,\n help='Options to determine which line synonyms.tsv should be read from'\n)\n\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n args = parser.parse_args()\n wsgi_app = create_app(args.config)\n with wsgi_app.app_context():\n config = wsgi_app.config['APP_CONFIG']\n session = create_session(config)\n scrap_synonyms(session, args.line)\n\n\ndef scrap_synonyms(session: Session, start_line: int):\n \"\"\"synonyms.tsv 파일을 읽어 유의어 및 단어 정보를 저장하는 함수\n\n :param session: 사용할 세션\n :type session: :class:`sqlalchemy.orm.session.Session`\n :param start_line: 파일 읽기 시작할 라인\n :type start_line: :class:`int`\n\n \"\"\"\n log = logger.getChild('scrap_synonyms')\n synset_list_dir = Path('synonyms.tsv').resolve()\n\n with open(synset_list_dir, newline='') as f:\n rows = csv.reader(f, delimiter='\\t')\n for row_num, row in enumerate(rows):\n if row_num < start_line:\n continue\n lemmas_list = [l.strip() for l in row[3].split(',')]\n log.info(f'Start saving the row {row_num} ({lemmas_list})')\n pronunciation_ids = []\n for lemmas in lemmas_list:\n log.info(f'Start saving the words ({lemmas})')\n pronunciation_id = save_word(lemmas, session)\n if pronunciation_id:\n pronunciation_ids.append(pronunciation_id)\n log.info(f'Done saving the words ({lemmas})')\n pronunciation_ids = list(set(pronunciation_ids))\n if len(pronunciation_ids) < 2:\n continue\n for i in range(len(pronunciation_ids)):\n synonyms_ids = pronunciation_ids[:]\n criteria_id = synonyms_ids.pop(i)\n log.info(f'Start saving the synonyms ({criteria_id})')\n for synonyms_id in synonyms_ids:\n relation = SynonymsWordRelation(\n criteria_id=criteria_id, relation_id=synonyms_id,\n )\n session.add(relation)\n try:\n session.flush()\n log.info(f'Done saving the relationship'\n f' ({relation.__dict__})')\n except IntegrityError:\n session.rollback()\n log.warning(f'The relationship ({relation.__dict__})'\n ' already exists')\n log.info(f'Done saving the synonyms ({criteria_id})')\n session.commit()\n log.info(f'Done saving the row {row_num} ({lemmas_list})')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"init_word.py","file_name":"init_word.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"595983754","text":"from flask import Flask, request, Response,json\nfrom flask_restful import Resource, Api\nfrom json import dumps\nfrom flask import jsonify\nfrom werkzeug.utils import secure_filename\nfrom requests_toolbelt import MultipartEncoder\nimport os\napp = Flask(__name__, static_url_path='/static')\napi = Api(app)\nclass Image(Resource):\n def post(self):\n image = request.files.to_dict() #convert multidict to dict\n print(image.keys())\n file_name = image[\"Input\"].filename\n\n if not os.path.isdir(\"images\"):\n os.mkdir(\"images\")\n\n image[\"Input\"].save(\"images/\"+file_name)\n \n with open('static/result.txt') as text:\n res = text.readline().split()\n \n fields=({'Name': res[0],\n 'Data':'static/homer.jpg'})\n response = app.response_class(\n response=json.dumps(fields),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\n\n\n\n \n \napi.add_resource(Image, '/')\n\n\nif __name__ == '__main__':\n app.run(port='5002')\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"358031207","text":"import re\n\n\ndef clean(src, str_list):\n for str in str_list:\n src = src.replace(str, '')\n return src\n\n# search all manga chapter\nwith open('manga_item.html', encoding='utf-8') as f:\n doc = f.read()\n\n pattern = re.compile(r'
', re.DOTALL)\n res = re.search(pattern, doc)\n\n start = int(res.span()[0])\n end = int(res.span()[1])\n doc = doc[start:end]\n\n res = re.findall(r'', doc)\n\n links = []\n for item in res:\n pattern = re.compile(r'href=\"(.+)\"\\s')\n res = re.search(pattern, item)\n print(res.groups()[0])\n print(res.group())\n if res is not None:\n pos = res.span()\n item = item[pos[0]:pos[1]]\n item = clean(item, ['href=\"', '\"'])\n links.append(item)\n # links.append(item[pos[0]:pos[1]].replace('href=\"', '').replace('\"',''))\n\n for link in links:\n print(link)\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Regex/manga_regex.py","file_name":"manga_regex.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"327582795","text":"__author__ = 'Noriyuki_Fujimura'\n\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\nimgrgb=cv2.imread('data/smb3_lost03.jpg')\nimggray=cv2.cvtColor(imgrgb,cv2.COLOR_BGR2GRAY)\ntemplate=cv2.imread('data/mario_coin.jpg',0)\nw,h=template.shape[::-1]\n\nres=cv2.matchTemplate(imggray,template,cv2.TM_CCOEFF_NORMED)\nthreshold=0.8\n\nloc=np.where(res>=threshold)\nfor pt in zip(*loc[::-1]):\n cv2.rectangle(imgrgb,pt,(pt[0]+w,pt[1]+h),(0,0,255),2)\n\ncv2.imwrite('data/templatematchingresult_smb3_lost03.png',imgrgb)\n","sub_path":"templatematchingmultiple.py","file_name":"templatematchingmultiple.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"377136396","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\n\n# from mat2cuboid_annot import mat2cuboid_annot\n# from get_2d_bbox import get_bbox_abs, get_bbox_xywh\nimport os\nimport cv2\nimport json\nimport numpy as np\nfrom detectron2.structures import BoxMode\nfrom annot_processor import merge_annot_files, remove_faulty, check_visibilty\n\n\ndef convert2detectron_format(annot_list, images_dir):\n '''\n inputs: annotations list to be converted\n path to 'images' folder in the dataset - used for retreiving fileName\n ouput: standard detectron 2 dataset format (json_like)\n It is required that all images are in an 'images' folder in the same \n directory as the annotation file\n \n ''' \n # with open(annot_file) as f:\n # annot_list = json.load(f)\n \n dataset_list = []\n for annot in annot_list: #per image loop\n record = {} \n entry = annot\n \n image_id = entry['fileName'].split('/')[-1]\n image_id = image_id.split('\\\\')[-1]\n image_id = image_id.split('.')[-2] #use last section of image directory as image id\n record['image_id'] = image_id\n # print(image_id)\n \n # images_dir = annot_file.split('/')[:-1]\n # img_path = os.path.join('/', *images_dir, 'images', image_id + '.jpg') #construct path to images folder. * used to force accepting lists\n # images_dir = images_dir.split('/')\n img_path = os.path.join(images_dir, image_id + '.jpg') #construct path to images folder. * used to force accepting lists\n record['file_name'] = img_path\n \n height, width = cv2.imread(img_path).shape[:2]\n record['height'] = height\n record['width'] = width\n\n annotations = []\n for i in range(len(entry['squares'])): #per object instance loop\n instance = {}\n bbox = entry['squares'][i] \n if bbox: #non-empty annotations\n instance['category_id'] = 0 #foreground (cuboid)\n else:\n instance['category_id'] = 1 #background \n #ToDo: leave category empty for background? \n try:\n bbox_abs = get_bbox_abs_coor(bbox, height, width)\n bbox_xywh = get_bbox_xywh(bbox_abs)\n except IndexError:\n print('2D BBox could not be retreived!')\n return\n \n cuboid = entry['cubes'][i]\n if cuboid: #non-empty\n cuboid_abs = get_cuboid_abs_coor(cuboid, height, width)\n # cuboid_interleaved = interleave_visibilty(cuboid_abs)\n \n instance['bbox'] = bbox_xywh\n instance['bbox_mode'] = BoxMode.XYWH_ABS \n # instance['keypoints'] = cuboid_interleaved\n instance['keypoints'] = cuboid_abs\n annotations.append(instance)\n record['annotations'] = annotations \n dataset_list.append(record)\n \n # with open(\"test.json\", \"w\") as write_file:\n # json.dump(dataset_list, write_file, indent=4, sort_keys=True) \n \n return dataset_list\n\ndef get_bbox_abs_coor(coordinates, height, width):\n '''\n converts coordinates of the corners of a single cuboid or bbox to absolute pixel positions\n '''\n coors_abs = [] #all converted box coordinates\n for coors in coordinates: #convert each x and y coordinate\n conv_coors = [] #single converted x,y coordinates\n coor_abs_x = coors[0]*width\n conv_coors.append(coor_abs_x)\n coor_abs_y = coors[1]*height\n conv_coors.append(coor_abs_y) \n coors_abs.append(conv_coors)\n return coors_abs\n\ndef get_cuboid_abs_coor(coordinates, height, width):\n '''\n converts coordinates of the corners of a single cuboid or bbox to absolute pixel positions.\n Also, converts a list[list] containing x,y corrdinates of the corners of the cuboid into a single list\n '''\n # coors_abs = [] #all converted box coordinates\n cuboid_mod = []\n for coors in coordinates: #convert each x and y coordinate\n conv_coors = [] #single converted x,y coordinates\n coor_abs_x = coors[0]*width\n conv_coors.append(coor_abs_x)\n coor_abs_y = coors[1]*height\n conv_coors.append(coor_abs_y)\n conv_coors.append(coors[2]) #adds visibility\n # coors_abs.append(conv_coors)\n cuboid_mod.extend(conv_coors)\n return cuboid_mod\n\ndef get_bbox_xywh (bbox_abs): #(x,y) is that of the top-left corner\n '''\n returns a single list containing the xywh bbox conversion\n ''' \n x_min = bbox_abs[0][0] #hanndle 1 box only\n x_max = bbox_abs[2][0]\n y_min = bbox_abs[0][1]\n y_max = bbox_abs[2][1] \n w = x_max - x_min\n h = y_max - y_min\n bbox_xywh = [x_min, y_min, w, h] #converted bbox\n return bbox_xywh\n\n# def interleave_visibilty(cuboid, dummy):\n# '''\n# Input: - a list[list] containing x,y corrdinates of the corners of the cuboid\n# - a flag for interleaving with dummy visibility values. Set to False to use actual values in annotation\n# Output: a single list of x,y corner coordinates of the cuboid interleaved with visibilty value\n# '''\n# v = 2 #visibility\n# cuboid_mod = []\n# # cuboid_mod.extend(cuboid_coors)\n# # cuboid_mod.append(v)\n# for i in range(len(cuboid)):\n# cuboid_mod.extend(cuboid[i])\n# cuboid_mod.append(v)\n \n# return cuboid_mod \n \n\nif __name__ == '__main__':\n annot_file_dir = '/home/porthos/masters_thesis/datasets/augmented_dataset/annotations_hazem.json'\n annot_files_dir_list = [annot_file_dir]\n annot_files_merged = merge_annot_files(annot_files_dir_list)\n annot_file_corrected = remove_faulty(annot_files_merged)\n check_visibilty(annot_file_corrected)\n images_dir = '/home/porthos/masters_thesis/datasets/augmented_dataset/images'\n dataset_list = convert2detectron_format(annot_file_corrected, images_dir)\n \n \n \n \n \n \n \n ","sub_path":"format_converter.py","file_name":"format_converter.py","file_ext":"py","file_size_in_byte":6025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"25203002","text":"import numpy\ndef drawSkeleton(input, hms, coords):\n im = input\n\n pairRef = (\n (1,2), (2,3), (3,7),\n (4,5), (4,7), (5,6),\n (7,9), (9,10),\n (14,9), (11,12), (12,13),\n (13,9), (14,15), (15,16)\n )\n\n partNames = ('RAnk','RKne','RHip','LHip','LKne','LAnk',\n 'Pelv','Thrx','Neck','Head',\n 'RWri','RElb','RSho','LSho','LElb','LWri')\n partColor = (1,1,1,2,2,2,0,0,0,0,3,3,3,4,4,4)\n\n actThresh = 0.002\n\n # Loop through adjacent joint pairings\n for pair in pairRef:\n if np.mean(hms[pair[0]]) > actThresh and np.mean(hms[pair[1]]) > actThresh:\n # Set appropriate line color\n if partColor[pair[0]] == 1:\n color = (0,85,255)\n elif partColor[pair[0]] == 2 : color = (255,85,0)\n elif partColor[pair[0]] == 3 : color = (0,0,255)\n elif partColor[pair[0]] == 4 : color = (255,0,0)\n else: color = (180,0,180)\n\n # Draw line\n im = drawLine(im, coords[pair[0]], coords[pair[1]], 4, color, 0)\n \n return im\n","sub_path":"python/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"259839873","text":"from flask import *\nfrom pymongo import MongoClient\nfrom bson.objectid import ObjectId\n\nimport config\n\napp = Flask(__name__)\napp.secret_key = '12345'\n\nclient = MongoClient(config.mongo_ip, config.mongo_port)\ndb = client.testdb\n\n@app.route('/')\ndef leaderboard():\n \"\"\"\n Renders The Main Leaderboard\n \"\"\"\n\n _scores = db.scores.find().sort(\"score\", -1)\n scores = [score for score in _scores]\n\n return render_template('leaderboard.html', scores=scores)\n\n\n@app.route('/new', methods=['POST'])\ndef new_score():\n \"\"\"\n Posts a new score to the leaderboard\n \"\"\"\n\n score_doc = {\n 'username': request.form['username'],\n 'score': int(request.form['score'])\n }\n res = db.scores.replace_one(score_doc, score_doc, True);\n\n if not res.upserted_id:\n flash('Username/Score Combination Already Exists')\n\n return redirect(url_for('leaderboard'))\n\n\n@app.route('/delete', methods=['POST'])\ndef delete_score():\n \"\"\"\n Deletes a score\n \"\"\"\n\n score_doc = {\n '_id': ObjectId(request.form['score_id'])\n }\n result = db.scores.delete_one(score_doc)\n\n return redirect(url_for('leaderboard'))\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"487027783","text":"# 2. Implement the fill contour using morphological operations algorithm presented during lecture 5.\nimport numpy as np\nimport cv2 as cv\nimport time\n\nraw_image = cv.imread(cv.samples.findFile(\"..\\\\resources\\\\contour_fill_input_image.png\"))\n\n# now, apply the Flood Fill algorithm:\n\n\"\"\"\nGet the greatest value from the sub-matrix defined by the kernel and the source image\nparam source_image: a numpy matrix representing the image on which the kernel must be applied\nparam kernel: a numpy matrix representing the kernel that must be applied on the source image;\n the kernel must be a square matrix and the side must be odd.\n also, the kernel must only contain 1 and 0; 1 values are placed on the interesting positions;\nparam center_x: the abscissa of the point on which the anchor of the kernel must be superposed\nparam center_y: the ordinate of the point on which the anchor of the kernel must be superposed\nparam not_indexes: \n\"\"\"\ndef apply_dilation_kernel(source_image, kernel, center_x, center_y, not_indexes):\n image_copy = np.array(source_image)\n lim_x_1, lim_x_2 = center_x - int(kernel.shape[1] / 2), center_x + int(kernel.shape[1] / 2)\n lim_y_1, lim_y_2 = center_y - int(kernel.shape[0] / 2), center_y + int(kernel.shape[0] / 2)\n source_interesting_portion = image_copy[lim_y_1:lim_y_2+1, lim_x_1:lim_x_2+1]\n source_interesting_portion[tuple(np.array(not_indexes).T)] = 255\n return source_interesting_portion.min()\n\n\"\"\"\nIntersection of two different images with the same shape. Both images must be binary.\nparam image_1: numpy matrix\nparam image_2: numpy matrix\nparam val_1: the value that must be considered 1 (i.e. the value that must be in both images\n at the same pixel in order to be preserved)\nparam val_2: the background value\n\"\"\"\ndef intersect(image_1, image_2, val_1, val_2):\n indexes_1_y, indexes_1_x = np.where(image_1 == val_1)\n indexes_2_y, indexes_2_x = np.where(image_2 == val_1)\n indexes_1 = np.array(list(zip(indexes_1_y, indexes_1_x)))\n indexes_2 = np.array(list(zip(indexes_2_y, indexes_2_x)))\n binary_1 = np.zeros(image_1.shape)\n binary_1[tuple(indexes_1.T)] = 1\n binary_2 = np.zeros(image_2.shape)\n binary_2[tuple(indexes_2.T)] = 1\n intersection = np.logical_and(binary_1, binary_2)\n indexes_3_y, indexes_3_x = np.where(intersection == True)\n indexes_3 = list(zip(indexes_3_y, indexes_3_x))\n intersection = np.ones(image_1.shape) * val_2\n # intersection[[*np.array(indexes_3).T]] = val_1 # deprecated\n intersection[tuple(np.array(indexes_3).T)] = val_1\n return intersection\n\n\n\"\"\"\nProvide the result of applying a dilation with the provided kernel on the provided numpy matrix\nparam image_matrix: a numpy matrix representing a binary image\nparam kernel: a numpy matrix representing the dilation kernel\n\"\"\"\ndef dilate(image_matrix, kernel):\n kernel_side = kernel.shape[0]\n result = np.array(image_matrix)\n x_min, x_max = 0, image_matrix.shape[1]\n y_min, y_max = 0, image_matrix.shape[0]\n\n # compute the list of unimportant coordinates:\n not_kernel = np.logical_not(kernel)\n not_indexes_y, not_indexes_x = np.where(not_kernel == 1)\n not_indexes = list(zip(not_indexes_y, not_indexes_x))\n\n for current_x in range(x_min, x_max):\n if current_x - int(kernel_side / 2) >= 0 and current_x + int(kernel_side / 2) < x_max:\n for current_y in range(y_min, y_max):\n if current_y - int(kernel_side / 2) >= 0 and current_y + int(kernel_side / 2) < y_max:\n result[current_y, current_x] = apply_dilation_kernel(image_matrix, kernel, current_x, current_y,\n not_indexes)\n return result\n\n\n\"\"\"\nProvide the union of the two matrices; Basically a logical or, where 1 is replaced by val_1 and \n0 is replaced by val_2;\nparam image_matrix_1: a numpy matrix representing a binary image\nparam image_matrix_2: same as above\nparam val_1: the integer that must be treated as 1 in logical_or\nparam val_2: the integer that must be treated as 0 in logical_or\n\"\"\"\ndef union(image_matrix_1, image_matrix_2, val_1, val_2):\n indexes_1_y, indexes_1_x = np.where(image_matrix_1 == val_1)\n indexes_2_y, indexes_2_x = np.where(image_matrix_2 == val_1)\n indexes_1 = np.array(list(zip(indexes_1_y, indexes_1_x)))\n indexes_2 = np.array(list(zip(indexes_2_y, indexes_2_x)))\n binary_1 = np.zeros(image_matrix_1.shape)\n binary_1[tuple(indexes_1.T)] = 1\n binary_2 = np.zeros(image_matrix_2.shape)\n binary_2[tuple(indexes_2.T)] = 1\n union = np.logical_or(binary_1, binary_2)\n indexes_3_y, indexes_3_x = np.where(union == True)\n indexes_3 = list(zip(indexes_3_y, indexes_3_x))\n union = np.ones(image_matrix_1.shape) * val_2\n union[tuple(np.array(indexes_3).T)] = val_1\n return union\n\n\n\n\"\"\"\nFill the contour from source_image with content (i.e. pixels)\nparam source_image: a numpy matrix, representing an image that contains a closed line (i.e. the contour)\n to be filled\nparam start_point_y: an integer representing the ordinate of the starting point\nparam start_point_x: an integer representing the abscissa of the starting point\n\"\"\"\ndef flood_fill(source_image, start_point_y, start_point_x):\n kernel = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.uint8)\n # kernel = np.array([[255, 0, 255], [0, 0, 0], [255, 0, 255]], dtype=np.uint8)\n # kernel = np.array([[0, 255, 0], [255, 255, 255], [0, 255, 0]], dtype=np.uint8)\n\n\n # get the complement of the source_image:\n grayscale_image = cv.cvtColor(source_image, cv.COLOR_BGR2GRAY)\n res, inverted_binary_image = cv.threshold(grayscale_image, type=cv.THRESH_BINARY_INV, maxval=255, thresh=0)\n res2, binary_image = cv.threshold(grayscale_image, type=cv.THRESH_BINARY, maxval=255, thresh=0)\n\n # binary_aux = np.array(binary_image)\n binary_aux = np.ones(binary_image.shape) * 255\n binary_aux[start_point_y, start_point_x] = 0\n\n # do the flood fill:\n i = 0\n while True:\n cv.imwrite(\"..\\\\output\\\\binary_aux_image_\" + str(i) + \".png\", binary_aux)\n #cv.imshow(\" \", binary_aux)\n new_image = np.array(binary_aux, dtype=np.uint8)\n # dilated_image = dilate(new_image, kernel)\n dilated_image = cv.erode(new_image, kernel=kernel)\n # intersected_image = intersect(dilated_image, inverted_binary_image, 0, 255)\n intersected_image = cv.bitwise_or(dilated_image, inverted_binary_image)\n if np.array_equal(intersected_image, binary_aux):\n break\n binary_aux = intersected_image\n i += 1\n\n # new_image = np.array(binary_aux, dtype=np.uint8)\n # dilated_image = cv.erode(new_image, kernel)\n # cv.imwrite(\"..\\\\output\\\\binary_aux_image_1.png\", dilated_image)\n\n # return union(binary_aux, binary_image, 0, 255)\n return cv.bitwise_and(binary_image, binary_aux)\n\nstart = time.time()\nafter_flood_fill = flood_fill(raw_image, 48, 111)\nend = time.time()\ncv.imwrite(\"..\\\\output\\\\after_flood_fill.png\", after_flood_fill)\n\nprint(\"execution time in seconds: \", end - start)\n\n","sub_path":"application/lab1_solution2.py","file_name":"lab1_solution2.py","file_ext":"py","file_size_in_byte":7127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"74804263","text":"from scipy.io import arff\nfrom sklearn.svm import SVC\nimport pandas as pd\nimport numpy as np\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import classification_report, confusion_matrix\nimport filehelper\ndef trainNetwork():\n #data, meta = arff.loadarff(open('../audio/test_melspectogram.arff', 'rb'))\n with open('../audio/test_melspectogram.arff','r') as f:\n data, meta = arff.loadarff(f)\n print(data)\n df = pd.DataFrame(data)\n data=df.values\n X_train=data[:,0:13]\n y_train=data[:,13]\n svclassifier = SVC(gamma='auto')\n print(X_train.shape)\n print(y_train.shape)\n print(np.unique(y_train))\n svclassifier.fit(X_train, y_train)\n y_pred = svclassifier.predict(X_train)\n confusion_matrix(y_pred, y_train)\n print()\n print(\"-----------------Accuracy result--------------------\")\n print()\n s=classification_report(y_train, y_pred)\n print(s)\n filehelper.save_object(svclassifier,\"../audio/svm.model\")\n return s\n\n#trainNetwork()","sub_path":"python-code/glottal-pathalogy/train_glottal_dataset.py","file_name":"train_glottal_dataset.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"421346791","text":"import pygame\n\ndef star_game(screen):\n while True:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if pygame.K_ESCAPE:\n print('ESCAPE')\n return\n screen.fill((0,255,0))\n pygame.display.update()","sub_path":"game/Prototipo/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"93375583","text":"#!/usr/bin/env python3\n\nimport rospy, cv2, numpy\nimport gym_duckietown\nfrom gym_duckietown.simulator import Simulator\nfrom duckietown.dtros import DTROS, NodeType\nfrom duckietown_msgs.msg import WheelsCmdStamped\nfrom sensor_msgs.msg import CompressedImage\nfrom cv_bridge import CvBridge\n\nclass SimulatorNode(DTROS):\n def __init__(self, node_name):\n super(SimulatorNode, self).__init__(\n node_name=node_name,\n node_type=NodeType.PERCEPTION\n )\n\n # Subscribers\n self.sub_wheels_cmd = rospy.Subscriber(\n \"/fakebot/wheels_driver_node/wheels_cmd\",\n WheelsCmdStamped,\n self.wheels_cmd_callback, \n queue_size=1\n )\n\n # Publishers\n self.pub_img = rospy.Publisher(\n \"/fakebot/camera_node/image/compressed\",\n CompressedImage,\n queue_size=1,\n )\n self.action = [0,0]\n\n def wheels_cmd_callback(self, msg_wheels_cmd):\n vel_left = msg_wheels_cmd.vel_left\n vel_right = msg_wheels_cmd.vel_right\n self.action = [vel_left,vel_right]\n\n def start(self):\n env = gym_duckietown.simulator.Simulator(\n seed=123, # random seed\n map_name=\"loop_empty\",\n max_steps=500001, # we don't want the gym to reset itself\n domain_rand=0,\n camera_width=640,\n camera_height=480,\n accept_start_angle_deg=4, # start close to straight\n full_transparency=True,\n distortion=True,\n ) \n bridge = CvBridge()\n while (True):\n action = self.action\n observation, reward, done, misc = env.step(action)\n env.render()\n if done:\n env.reset()\n #Generate the compressed image\n image =cv2.cvtColor(numpy.asarray(observation),cv2.COLOR_RGB2BGR)\n image_message = bridge.cv2_to_compressed_imgmsg(image, dst_format='jpeg')\n #Publish the compressed image\n self.pub_img.publish(image_message)\n #rospy.sleep(rospy.Duration.from_sec(0.001))\n \n \nif __name__ == '__main__':\n simulator_node = SimulatorNode(node_name='simulator_node')\n simulator_node.start()\n rospy.spin()\n","sub_path":"packages/my_package/src/simulator_node.py","file_name":"simulator_node.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"25917946","text":"from __future__ import absolute_import\n\nimport sys\n\nfrom alembic.config import Config\nfrom alembic.migration import MigrationContext\nfrom alembic.script import ScriptDirectory\n\nfrom ..lib.sqla import (\n get_metadata, get_session_maker, mark_changed)\n\n\ndef bootstrap_db(config_uri=None, with_migration=True):\n \"\"\"Bring a blank database to a functional state.\"\"\"\n\n db = get_session_maker()\n\n if with_migration:\n context = MigrationContext.configure(db().connection())\n db_version = context.get_current_revision()\n\n if db_version:\n sys.stderr.write('Database already initialized. Bailing out.\\n')\n sys.exit(0)\n\n config = Config(config_uri)\n script_dir = ScriptDirectory.from_config(config)\n heads = script_dir.get_heads()\n\n if len(heads) > 1:\n sys.stderr.write('Error: migration scripts have more than one '\n 'head.\\nPlease resolve the situation before '\n 'attempting to bootstrap the database.\\n')\n sys.exit(2)\n\n import assembl.models\n get_metadata().create_all(db().connection())\n\n # Clean up the sccoped session to allow a later app instantiation.\n if with_migration and heads:\n context = MigrationContext.configure(db().connection())\n context._update_current_rev(db_version, heads[0])\n return db\n\n\ndef bootstrap_db_data(db):\n # import after session to delay loading of BaseOps\n from assembl.models.auth import (\n populate_default_permissions, populate_default_roles)\n populate_default_permissions(db())\n populate_default_roles(db())\n mark_changed()\n\n\ndef ensure_db_version(config_uri, session_maker):\n \"\"\"Exit if database is not up-to-date.\"\"\"\n config = Config(config_uri)\n script_dir = ScriptDirectory.from_config(config)\n heads = script_dir.get_heads()\n\n if len(heads) > 1:\n sys.stderr.write('Error: migration scripts have more than one head.\\n'\n 'Please resolve the situation before attempting to '\n 'start the application.\\n')\n sys.exit(2)\n else:\n repo_version = heads[0] if heads else None\n\n context = MigrationContext.configure(session_maker()().connect())\n db_version = context.get_current_revision()\n\n if not db_version:\n sys.stderr.write('Database not initialized.\\n'\n 'Try this: \"assembl-db-manage %s bootstrap\".\\n'\n % config_uri)\n sys.exit(2)\n\n if db_version != repo_version:\n sys.stderr.write('Stopping: DB version (%s) not up-to-date (%s).\\n'\n % (db_version, repo_version))\n sys.stderr.write('Try this: \"assembl-db-manage %s upgrade head\".\\n'\n % config_uri)\n sys.exit(2)\n\n\ndef is_migration_script():\n \"\"\"Determine weather the current process is a migration script.\"\"\"\n return 'alembic' in sys.argv[0] or 'assembl-db-manage' in sys.argv[0]\n\n\ndef includeme(config):\n \"\"\"Initialize Alembic-related stuff at app start-up time.\"\"\"\n skip_migration = config.registry.settings.get('app.skip_migration')\n if not skip_migration and not is_migration_script():\n ensure_db_version(\n config.registry.settings['config_uri'], get_session_maker())\n","sub_path":"assembl/lib/migration.py","file_name":"migration.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"307824588","text":"import json, re, pickle\nimport numpy as np\n# To add delimiter to the result of split()\n# https://stackoverrun.com/ja/q/362655\n\nWORD_DELIMITER_LIST = r'[^0-9a-zA-Z]'\n\n\nclass GOentitydefinition2tensor():\n def __init__(self,word2iddict,GO_obo_jsonpath,delimiter_list,wordmatrix):\n self.word2id = word2iddict\n self.GO_obo_jsonpath = GO_obo_jsonpath\n self.delimiter_list = delimiter_list\n self.wordmatrix = wordmatrix\n\n def id2GO_id(self):\n id2GO_id = {}\n GO_id2id = {}\n id2entitydef = {}\n with open(self.GO_obo_jsonpath,'r') as GOj:\n gojson = json.load(GOj)\n\n for id, desc in gojson.items():\n id2GO_id[id] = desc['id']\n GO_id2id[desc['id']] = id\n\n id2entitydef[id] = desc['def']\n\n return id2GO_id, GO_id2id, id2entitydef\n\n def entity_def_splitter(self,one_entity_def_sentence):\n # [^[:alnum:]](if exists.[[^:alnum:]])\n splitted_sentence = re.split('(\\W+)',one_entity_def_sentence)\n return splitted_sentence\n\n def entity_def_splitted_list2embeddings(self,entity_def_splitted_list):\n sentence2wordid = []\n for word in entity_def_splitted_list:\n if word in self.word2id:\n sentence2wordid.append(self.word2id[word])\n else:\n pass\n entity_embeddings = np.random.uniform(-np.sqrt(0.06), np.sqrt(0.06), (1,200))\n for one_word_id in sentence2wordid:\n #print(self.wordmatrix.shape)\n entity_embeddings += self.wordmatrix[one_word_id]\n\n return entity_embeddings / len(sentence2wordid)\n\n def entitymatrix_creator(self,id2GO_id,GO_id2id,id2entitydef):\n entity_embedding_matrix = np.random.uniform(-np.sqrt(0.06), np.sqrt(0.06), (len(id2GO_id), 200))\n for one_id in id2GO_id.keys():\n entity_embedding_matrix[int(one_id)] = self.entity_def_splitted_list2embeddings(entity_def_splitted_list=self.entity_def_splitter(one_entity_def_sentence=id2entitydef[one_id]))\n\n return entity_embedding_matrix\n\nif __name__ == '__main__':\n GO_obo_json_path = './craft2go_dataset_dir/go.obo.json'\n WORD2iddict_path = './../pretrained_embeddings/embeddings_BioAsQ/bioasq_allembedding.pkl'\n entity_embeddig_matrix_path = './craft2go_dataset_dir/entitymatrix.pkl'\n '''\n with open(bioasq_embeddingpath,'wb') as be:\n all_data_json = {'word2id':word2id,\n 'id2word':id2word,\n 'word2embed':word2embeddingdict,\n 'wordmatrix':wordmatrix}\n pickle.dump(all_data_json,be)\n '''\n with open(WORD2iddict_path,'rb') as Wp:\n all_data_json = pickle.load(Wp)\n word2id = all_data_json['word2id']\n wordmatrix = all_data_json['wordmatrix']\n\n GOentitydefinition2tensor = GOentitydefinition2tensor(word2iddict=word2id,\n GO_obo_jsonpath=GO_obo_json_path,\n delimiter_list=WORD_DELIMITER_LIST,\n wordmatrix=wordmatrix)\n\n id2GO_id, GO_id2id, id2entitydef = GOentitydefinition2tensor.id2GO_id()\n entity_embedding_matrix = GOentitydefinition2tensor.entitymatrix_creator(id2GO_id=id2GO_id,\n GO_id2id=GO_id2id,\n id2entitydef=id2entitydef)\n with open(entity_embeddig_matrix_path,'wb') as eemp:\n pickle.dump(entity_embedding_matrix,eemp)\n\n GO_id_json_recordpath = './craft2go_dataset_dir/go2id_and_entity2id.json'\n\n GO_iddata = {'id2GO_id':id2GO_id,\n 'GO_id2id':GO_id2id,\n 'id2entitydef':id2entitydef}\n\n with open(GO_id_json_recordpath,'w') as GOID:\n json.dump(GO_iddata, GOID, ensure_ascii=False, indent=4)\n\n","sub_path":"CRAFT2GO/GO_obo_entitydefinition_and_CRAFTeachmention2tensor.py","file_name":"GO_obo_entitydefinition_and_CRAFTeachmention2tensor.py","file_ext":"py","file_size_in_byte":3965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"204591166","text":"# -*- coding: utf-8 -*-\n\n#use AdamOptimizer (1-e4)\nimport collections\nimport tensorflow as tf \nimport cv2\nimport os\nimport numpy as np\nfrom numpy import *\n\nsize = 64\ntrain_nums = 210082\ntest_nums = 41826\nTrain_allcut = \"train/all_cut_64/\"\nTest_allcut = \"test/all_cut_64/\"\n\n#Train_good = \"train/good_cut/\"\n#Train_bad = \"train/bad_cut_filter/\"\n#Test_good = \"test/good_cut/\"\n#Test_bad = \"test/bad_cut_filter/\"\nprint(\"Initializing...\")\n\nbatch_size = 50\ntrain_images = array([[[[0 for i in range(3)] for j in range(size)] for k in range(size)] for l in range(batch_size)])\n#test_images = array([[[[0 for i in range(3)] for j in range(size)] for k in range(size)] for l in range(test_nums)])\n#labels: bad=0,1 good=1,0\ntrain_labels = array([[0 for i in range(2)] for k in range(batch_size)])\n#test_labels = array([[0 for i in range(2)] for k in range(test_nums)])\n\nDatasets = collections.namedtuple('Datasets', ['train_images', 'train_labels'])\n\ncut_list = []\nfor img in os.listdir(Train_allcut):\n cut_list.append(img)\n\ncurrent = 0\ndef get_next_trainbatch():\n global batch_size\n global train_nums\n global current\n batch_realsize = batch_size\n if (train_nums - current) <= batch_size:\n batch_realsize = (train_nums - current)\n for i in range(batch_realsize):\n img = cut_list[current+i]\n train_images[i] = cv2.imread(Train_allcut+img)\n flag = img[-7:-4]\n if flag == \"bad\" :\n train_labels[i] = array([0,1])\n else:\n train_labels[i] = array([1,0])\n current = current + batch_realsize\n\n return Datasets(train_images=train_images, train_labels=train_labels)\n\n#def get_testbatch():\n# global test_nums\n# for i in range(test_nums):\n# img = cut_list[current+i]\n# train_images[i] = cv2.imread(Train_allcut+img)\n# flag = img[-7:-4]\n# if flag == \"bad\" :\n# train_labels[i] = array([0,1])\n# else: \n# train_labels[i] = array([1,0])\n# current = current + batch_realsize\n#\n# return Datasets(train_images=train_images, train_labels=train_labels)\n\nx = tf.placeholder(tf.float32, [None, 64,64,3]) #输入的数据占位符 64x64x3\ny_actual = tf.placeholder(tf.float32, shape=[None, 2]) #输入的标签占位符\nkeep_prob = tf.placeholder(\"float\") \n\n#定义一个函数,用于初始化所有的权值 W\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n#定义一个函数,用于初始化所有的偏置项 b\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n \n#定义一个函数,用于构建卷积层\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n#定义一个函数,用于构建池化层\ndef max_pool(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1], padding='SAME')\n\n#构建网络\nx_image = tf.reshape(x,[-1,64,64,3]) #转换输入数据shape,以便于用于网络中 64x64x3 -> 60x60x3\n\nW_conv1 = weight_variable([5, 5, 3, 64]) \nb_conv1 = bias_variable([64]) \nh_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) #第一个卷积层 60x60x64\nh_pool1 = max_pool(h_conv1) #第一个池化层 30x30x64\n\nW_conv2 = weight_variable([5, 5, 64, 128])\nb_conv2 = bias_variable([128])\nh_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) #第二个卷积层 30x30x128\nh_pool2 = max_pool(h_conv2) #第二个池化层 16x16x128\n\nW_conv3 = weight_variable([5, 5, 128, 256])\nb_conv3 = bias_variable([256])\nh_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3) #第二个卷积层 30x30x128\nh_pool3 = max_pool(h_conv3) #第二个池化层 16x16x128\n\nW_fc1 = weight_variable([8 * 8 * 128, 512])\nb_fc1 = bias_variable([512])\nh_pool3_flat = tf.reshape(h_pool3, [-1, 8*8*128]) #reshape成向量\nh_fc1 = tf.nn.relu(tf.matmul(h_pool3_flat, W_fc1) + b_fc1) #第一个全连接层\n\nh_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) #dropout层\n\nW_fc2 = weight_variable([512, 2])\nb_fc2 = bias_variable([2])\ny_predict=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2) #softmax层\n\ncross_entropy = -tf.reduce_sum(y_actual*tf.log(y_predict)) #交叉熵\ntrain_step = tf.train.GradientDescentOptimizer(1e-3).minimize(cross_entropy) #梯度下降法\ncorrect_prediction = tf.equal(tf.argmax(y_predict,1), tf.argmax(y_actual,1)) \naccuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\")) #精确度计算\nsess=tf.InteractiveSession() \nsess.run(tf.global_variables_initializer())\nprint(tf.shape(h_pool1))\n\nfor i in range(int(train_nums/batch_size) + 1):\n batch = get_next_trainbatch()\n if i%100 == 0: #训练100次,验证一次\n #train_acc = accuracy.eval(feed_dict={x:batch[0], y_actual: batch[1], keep_prob: 1.0})\n #print('step',i,'training accuracy',train_acc)\n pass\n train_step.run(feed_dict={x: batch[0], y_actual: batch[1], keep_prob: 0.5})\n\nexit(0)\ntest_acc=accuracy.eval(feed_dict={x: mnist.test.images, y_actual: mnist.test.labels, keep_prob: 1.0})\nprint(\"Final test accuracy\",test_acc)\n","sub_path":"deep_cancer3.py","file_name":"deep_cancer3.py","file_ext":"py","file_size_in_byte":5183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"613925411","text":"import json\nimport os\nfrom collections import defaultdict\nfrom typing import List\n\nfrom loguru import logger\nfrom scipy.stats import ttest_rel\n\nalpha = 0.05\n\npretty_map = {\n 'ndcg': 'NDCG',\n 'hr': 'HR',\n 'ppr-collab': 'PPR-COLLAB',\n 'ppr-joint': 'PPR-JOINT',\n 'ppr-kg': 'PPR-KG',\n 'item-knn': 'Item kNN',\n 'user-knn': 'User kNN',\n 'transe': 'TransE',\n 'transe-kg': 'TransE-KG',\n 'transh': 'TransH',\n 'transh-kg': 'TransH-KG',\n 'random': 'Random',\n 'top-pop': 'TopPop',\n 'svd': 'SVD',\n 'bpr': 'BPR',\n 'wtp-all_entities': 'All entities',\n 'wtp-all_movies': 'All movies',\n 'ntp-all_entities': 'All entities',\n 'ntp-all_movies': 'All movies',\n 'wtp-substituting-4-4': '4/4',\n 'wtp-substituting-3-4': '3/4',\n 'wtp-substituting-2-4': '2/4',\n 'wtp-substituting-1-4': '1/4',\n 'ntp-substituting-4-4': '4/4',\n 'ntp-substituting-3-4': '3/4',\n 'ntp-substituting-2-4': '2/4',\n 'ntp-substituting-1-4': '1/4',\n 'wtp-substituting-1-0': '1/4 (no DEs)',\n 'mf': 'MF',\n 'joint-mf': 'Joint-MF'\n}\n\n\ndef line():\n return '\\\\\\\\\\\\hline'\n\n\ndef pretty(item):\n return pretty_map.get(item, item)\n\n\ndef get_p_val(results_base, model, metric, k_value, a, b):\n # Get intersecting splits\n metrics = list()\n for experiment in [a, b]:\n model_splits_path = os.path.join(os.path.join(results_base, experiment), model)\n model_splits = sorted([file for file in os.listdir(model_splits_path) if file != 'params.json'])\n\n model_metrics = list()\n for split in model_splits:\n split_path = os.path.join(model_splits_path, split)\n\n with open(split_path, 'r') as fp:\n model_metrics.append(json.load(fp)[metric][k_value])\n\n metrics.append(model_metrics)\n\n # Cutoff to minimum length\n min_length = len(min(metrics, key=len))\n metrics = [measure[:min_length] for measure in metrics]\n\n # t-test\n return ttest_rel(*metrics)[1]\n\n\ndef generate_table(results_base, experiments: List[str], metric='hr', test=None, k_value='10'):\n n_columns = 1 + len(experiments)\n\n table = \"\"\"\\\\begin{table*}[ht!]\\n\\t\\\\centering\\n\"\"\"\n\n # For each experiment, get summary files\n experiment_summary = defaultdict(dict)\n models = set()\n for experiment in experiments:\n experiment_results = dict()\n results_path = os.path.join(results_base, experiment)\n\n for file in os.listdir(results_path):\n if not file.startswith('summary') or not file.endswith('.json'):\n continue\n\n with open(os.path.join(results_path, file), 'r') as fp:\n summary = json.load(fp)\n\n for key, values in summary.items():\n if key in experiment_results:\n logger.warning(f'Duplicate model {key} for {experiment}')\n\n continue\n\n experiment_results[key] = values\n models.add(key)\n\n experiment_summary[experiment] = experiment_results\n\n if not experiment_results:\n logger.error(f'No summaries for {experiment}')\n\n return\n\n models = sorted(models, key=pretty)\n\n # Add header\n column_layout = '|'.join('l' if not idx else 'c' for idx in range(n_columns))\n table += \"\\t\\\\begin{tabular}{\" + column_layout + \"}\\n\"\n\n # Add first row with model names\n table += \"\\t\\t\\\\hline\\n\"\n columns = [f'& {pretty(experiment)}' for experiment in experiments]\n\n table += \"\\t\\t\\\\multicolumn{1}{c|}{Models} \" + ' '.join(columns) + \"\\n\"\n table += \"\\t\\t\" + line() + \"\\n\"\n\n # Get model-major results\n model_results = defaultdict(dict)\n highest_experiment_mean = defaultdict(float)\n for experiment, summary in experiment_summary.items():\n for model in models:\n if model not in summary:\n continue\n\n mean = round(summary[model][metric][k_value]['mean'], 2)\n model_results[model][experiment] = {\n 'mean': mean,\n 'std': summary[model][metric][k_value]['std']\n }\n\n if mean > highest_experiment_mean[experiment]:\n highest_experiment_mean[experiment] = mean\n\n for idx, model in enumerate(models):\n result_list = []\n\n for experiment in experiments:\n if experiment not in model_results[model]:\n result_list.append(' & N/A')\n\n continue\n\n mean = model_results[model][experiment]['mean']\n std = model_results[model][experiment]['std']\n\n base = f'{mean:.2f} \\\\pm {std:.2f}'\n significant = ''\n if test and experiment != test:\n significant = '^*' if get_p_val(results_base, model, metric, k_value, test, experiment) < alpha else ''\n if mean >= highest_experiment_mean[experiment]:\n result_list.append(\" & $\\\\mathbf{\" + base + significant + \"}$\")\n else:\n result_list.append(\" & $\" + base + significant + \"$\")\n\n table += \"\\t\\t\"\n\n # If not the first model row, add new line\n if idx:\n table += \"\\\\\\\\\"\n table += \" \" + pretty(model) + ''.join(result_list) + \"\\n\"\n\n # Add footer\n table += \"\\t\\t\" + line() + \"\\n\"\n table += \"\\t\\\\end{tabular}\\n\"\n table += \"\\t\\\\caption{\" + metric.upper() + \"@\" + k_value + \".}\\n\"\n table += \"\\\\end{table*}\"\n\n return table\n","sub_path":"utility/table_generator.py","file_name":"table_generator.py","file_ext":"py","file_size_in_byte":5422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"180701448","text":"import re\n\n\ndef string_to_column_name(str):\n \"\"\" get a column name for a categorial variable.\n\n Some of the categorial variables in the featur extractors are not\n suitable for column names.\n \"\"\"\n if not isinstance(str, basestring):\n return str\n only_word = re.sub('\\W+', '_', str).lower()\n return re.sub('^_|_$', '', only_word)\n\n\ndef simple_describe(df):\n wantdrop = ['unique', 'top', 'freq', '25%', '50%', '75%', 'first', 'last']\n desc = df.describe(include='all').transpose()\n to_drop = set(wantdrop).intersection(desc.columns)\n desc.drop(to_drop, axis=1, inplace=True)\n desc['perc'] = desc['count'] / len(df) * 100\n return desc\n","sub_path":"lib/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"274091756","text":"from tools.codegen.model import *\nfrom tools.codegen.api.types import *\nimport tools.codegen.local as local\nfrom typing import Optional, Sequence, Union, List\n\n# This file describes the translation of JIT schema to the public C++\n# API, which is what people use when they call functions like at::add.\n#\n# Prominent characteristics of the C++ API:\n#\n# - dtype, layout, device and pin_memory are collected into\n# a single C++ type TensorOptions (the native functions API\n# also has this, but tensor options is really most relevant\n# for the C++ API; it makes calling kwarg factory functions\n# pleasant)\n#\n# - for 'use_c10_dispatcher: full' functions, optional tensors are\n# represented explicitly using c10::optional\n#\n# - defaulting lives here (in fact, the dispatcher is completely\n# oblivious of defaults!)\n#\n# BTW: policy on name collisions: we try not to have types with\n# collisions, but functions are fair game to collide\n\ndef name(func: FunctionSchema) -> str:\n name = str(func.name.name)\n if func.is_out_fn():\n name += '_out'\n return name\n\n# Translation of \"value types\" in JIT schema to C++ API type. Value\n# types look the same no matter if they are argument types or return\n# types. Returns None if the type in question is not a value type.\ndef valuetype_type(t: Type) -> Optional[str]:\n if isinstance(t, BaseType):\n if t.name == BaseTy.Tensor:\n return None\n elif t.name == BaseTy.int:\n return 'int64_t'\n elif t.name == BaseTy.float:\n return 'double'\n elif t.name == BaseTy.str:\n return 'std::string'\n elif t.name in [BaseTy.bool, BaseTy.QScheme, BaseTy.Scalar,\n BaseTy.ScalarType, BaseTy.Generator, BaseTy.Storage,\n BaseTy.Layout, BaseTy.Device, BaseTy.MemoryFormat,\n BaseTy.Dimname, BaseTy.Stream, BaseTy.ConstQuantizerPtr]:\n # These C++ names line up with their schema names\n return t.name.name\n else:\n raise AssertionError(f\"unsupported type: {t}\")\n elif isinstance(t, OptionalType):\n elem = valuetype_type(t.elem)\n if elem is None:\n return None\n return f\"c10::optional<{elem}>\"\n elif isinstance(t, ListType):\n if str(t.elem) == 'bool':\n assert t.size is not None\n return f\"std::array\"\n else:\n return None\n else:\n raise AssertionError(f\"unrecognized type {repr(t)}\")\n\n# Translation of types occuring in JIT arguments to a C++ argument type.\ndef argumenttype_type(t: Type, *, mutable: bool) -> str:\n # If it's a value type, do the value type translation\n r = valuetype_type(t)\n if r is not None:\n return r\n\n if isinstance(t, BaseType):\n if t.name == BaseTy.Tensor:\n if mutable:\n return 'Tensor &'\n else:\n return 'const Tensor &'\n else:\n raise AssertionError(f\"base type should have been value type {t}\")\n elif isinstance(t, OptionalType):\n if str(t.elem) == 'Tensor':\n if mutable:\n return 'Tensor &' # TODO: fix this discrepancy\n else:\n if local.use_c10_dispatcher().dispatcher_uses_new_style():\n return 'const c10::optional&'\n else:\n return 'const Tensor &'\n elem = argumenttype_type(t.elem, mutable=mutable)\n return f\"c10::optional<{elem}>\"\n elif isinstance(t, ListType):\n # TODO: remove these special cases, ArrayRef fallthrough works fine\n if str(t.elem) == 'int':\n return \"IntArrayRef\"\n elif str(t.elem) == 'Tensor':\n return \"TensorList\"\n elif str(t.elem) == 'Dimname':\n return \"DimnameList\"\n # TODO: do something reasonable about lists of optional tensors\n elif (not local.use_c10_dispatcher().dispatcher_uses_new_style()) and str(t.elem) == 'Tensor?':\n return \"TensorList\"\n elem = argumenttype_type(t.elem, mutable=mutable)\n # TODO: explicitly qualify namespace here\n return f\"ArrayRef<{elem}>\"\n else:\n raise AssertionError(f\"unrecognized type {repr(t)}\")\n\n# Translate a JIT argument into its C++ type\ndef argument_type(a: Argument) -> str:\n return argumenttype_type(a.type, mutable=a.is_write)\n\n# Translation of a (non-multi) return type from JIT to C++\ndef returntype_type(t: Type, *, mutable: bool) -> str:\n r = valuetype_type(t)\n if r is not None:\n return r\n\n if isinstance(t, BaseType):\n if t.name == BaseTy.Tensor:\n if mutable:\n return 'Tensor &'\n else:\n return 'Tensor'\n elif isinstance(t, ListType):\n elem = returntype_type(t.elem, mutable=mutable)\n assert t.size is None, f\"fixed size list returns not supported: {t}\"\n return f\"std::vector<{elem}>\"\n\n raise AssertionError(f\"unrecognized return type {t}\")\n\n# Translation of a single return to its C++ type\ndef return_type(r: Return) -> str:\n return returntype_type(r.type, mutable=r.is_write)\n\n# Translation of a full (possibly multi) return from JIT to its C++ type\ndef returns_type(rs: Sequence[Return]) -> str:\n if len(rs) == 0:\n return 'void'\n elif len(rs) == 1:\n return return_type(rs[0])\n else:\n args = ','.join(map(return_type, rs))\n return f'std::tuple<{args}>'\n\ndef return_names(f: NativeFunction) -> Sequence[str]:\n returns: List[str] = []\n for i, r in enumerate(f.func.returns):\n # If we have an inplace function, the return argument is\n # implicitly named self.\n # TODO: Consider incorporating this into the data model\n if f.func.name.name.inplace:\n assert i == 0, \"illegal inplace function with multiple returns\"\n name = 'self'\n # If we are out function, the name is the name of the\n # corresponding output function (r.name will get recorded\n # in field_name later.)\n elif f.func.is_out_fn():\n name = f.func.arguments.out[i].name\n # If the return argument is explicitly named...\n elif r.name:\n name_conflict = any(r.name == a.name for a in f.func.schema_order_arguments())\n if name_conflict and not f.func.is_out_fn():\n name = f'{r.name}_return'\n else:\n name = r.name\n # If there is no explicit name, we just name the output result,\n # unless it's a multi-return, in which case it's result0,\n # result1, etc (zero-indexed)\n else:\n name = 'result' if len(f.func.returns) == 1 else f'result{i}'\n returns.append(name)\n return returns\n\nJIT_TO_CPP_DEFAULT = {\n 'False': 'false',\n 'True': 'true',\n 'None': 'c10::nullopt', # UGH this one is type directed\n 'Mean': 'at::Reduction::Mean',\n '[]': '{}',\n 'contiguous_format': 'MemoryFormat::Contiguous',\n 'long': 'at::kLong',\n}\n\n# Convert a JIT default into C++ expression representing the default\ndef default_expr(d: str, t: Type) -> str:\n if d == 'None' and str(t) == 'Tensor?':\n return '{}'\n if isinstance(t, BaseType) and t.name is BaseTy.str:\n # Schema allows single quotes but C++ needs double\n if len(d) >= 2 and d[0] == \"'\" and d[-1] == \"'\":\n s = ''\n i = 1\n while i + 1 < len(d):\n if d[i] != '\\\\':\n if d[i] == '\"':\n s += '\\\\\"'\n else:\n s += d[i]\n i += 1\n else:\n if d[i + 1] == \"'\":\n s += \"'\"\n else:\n s += d[i:i + 2]\n i += 2\n\n return f'\"{s}\"'\n\n if isinstance(t, OptionalType):\n if d == 'None':\n return 'c10::nullopt'\n\n return default_expr(d, t.elem)\n\n if isinstance(t, ListType):\n if (d.startswith('[') and d.endswith(']')):\n return '{' + d[1:-1] + '}'\n elif t.size is None:\n # NOTE: Sized lists can have scalar defaults\n raise ValueError(f\"Expected a list default '[...]' but found: '{d}'\")\n\n return JIT_TO_CPP_DEFAULT.get(d, d)\n\n# Convert an argument into its C++ API form\n\ndef argument_not_this(\n a: Union[Argument, TensorOptionsArguments],\n) -> CppArgument:\n if isinstance(a, Argument):\n return CppArgument(\n type=argument_type(a),\n name=a.name,\n default=default_expr(a.default, a.type) if a.default is not None else None,\n argument=a,\n )\n elif isinstance(a, TensorOptionsArguments):\n default = None\n if all(x.default == \"None\" for x in a.all()):\n default = '{}'\n elif a.dtype.default == \"long\":\n default = 'at::kLong' # TODO: this is wrong\n return CppArgument(\n type='const TensorOptions &',\n name='options',\n default=default,\n argument=a,\n )\n else:\n assert_never(a)\n\ndef argument(\n a: Union[Argument, TensorOptionsArguments, SelfArgument],\n) -> Union[CppSingleArgumentPack, CppThisArgumentPack]:\n if isinstance(a, SelfArgument):\n return CppThisArgumentPack(argument=a, type=argument_type(a.argument))\n else:\n return CppSingleArgumentPack(argument_not_this(a))\n\ndef argument_faithful(\n a: Union[Argument, TensorOptionsArguments, SelfArgument],\n) -> CppArgumentPack:\n if isinstance(a, TensorOptionsArguments):\n return CppTensorOptionsArgumentPack(\n argument=a,\n dtype=argument_not_this(a.dtype),\n layout=argument_not_this(a.layout),\n device=argument_not_this(a.device),\n pin_memory=argument_not_this(a.pin_memory),\n )\n else:\n return argument(a)\n\ndef group_arguments(\n func: FunctionSchema, *, method: bool\n) -> Sequence[Union[Argument, TensorOptionsArguments, SelfArgument]]:\n args: List[Union[Argument, SelfArgument, TensorOptionsArguments]] = []\n args.extend(func.arguments.out)\n args.extend(func.arguments.pre_self_positional)\n if func.arguments.self_arg is not None:\n if method:\n args.append(func.arguments.self_arg)\n else:\n args.append(func.arguments.self_arg.argument)\n args.extend(func.arguments.post_self_positional)\n args.extend(func.arguments.pre_tensor_options_kwarg_only)\n if func.arguments.tensor_options is not None:\n args.append(func.arguments.tensor_options)\n args.extend(func.arguments.post_tensor_options_kwarg_only)\n return args\n","sub_path":"tools/codegen/api/cpp.py","file_name":"cpp.py","file_ext":"py","file_size_in_byte":10728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"107915893","text":"# -*- coding: utf-8 -*-\n###############################################################################\n#\n###############################################################################\n\nimport time\nfrom openerp import models, fields, api\nfrom openerp.exceptions import ValidationError\n\n\nclass AdmissionAnalysis(models.TransientModel):\n\n \"\"\" Admission Analysis Wizard \"\"\"\n _name = 'admission.analysis'\n\n course_id = fields.Many2one('op.course', 'Course', required=True)\n start_date = fields.Date(\n 'Start Date', default=time.strftime('%Y-%m-01'), required=True)\n end_date = fields.Date('End Date', required=True)\n\n @api.multi\n def print_report(self):\n start_date = fields.Date.from_string(self.start_date)\n end_date = fields.Date.from_string(self.end_date)\n if start_date > end_date:\n raise ValidationError(\"End Date cannot be set before Start Date.\")\n else:\n data = self.read(\n ['course_id', 'start_date', 'end_date'])[0]\n return self.env['report'].get_action(\n self, 'eduerp_admission.report_admission_analysis',\n data=data)\n\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"eduerp_admission/wizard/admission_analysis_wizard.py","file_name":"admission_analysis_wizard.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"456255646","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport re\nimport json\n\nfrom scrapy.http import Request\nfrom urllib import parse\nfrom datetime import datetime\nfrom HotSpider.utility import common\nfrom HotSpider.items import HotspiderItem\nfrom lxml import etree\n\n\nclass AlimamaSpider(scrapy.Spider):\n name = 'alimama'\n allowed_domains = ['www.alimama.com']\n start_urls = ['https://www.alimama.com/case_detail.htm']\n\n def parse(self, response):\n # https://www.alimama.com/case_detail.htm\n almm_item = HotspiderItem()\n\n false_json = re.findall(\".*?var data = (\\[.*\\]).*?return\", response.text, re.S)[0]\n true_json = common.false_to_true_json(false_json)\n # print(json.dumps(true_json, indent=4, ensure_ascii=False))\n for num, i in enumerate(true_json):\n almm_item[\"url\"] = response.url+\"?spm=#%s\"% str(num+1)\n almm_item[\"thumbnail\"] = i[\"banners\"][\"thumbnail\"]\n almm_item[\"created_at\"] = i[\"article\"][\"time\"]\n almm_item[\"title\"] = i[\"article\"][\"title\"]\n almm_item[\"brand\"] = i[\"name\"]\n almm_item[\"effect\"] = str(i[\"data\"])\n almm_item[\"crawl_time\"] = datetime.now()\n almm_item[\"source\"] = \"alimama\"\n almm_item[\"id\"] = common.string_to_md5(almm_item[\"url\"])\n almm_item[\"original\"] = i[\"article\"][\"content\"].replace(\"&&\", \"\\\"\")\n\n almm_item[\"supplier\"] = \"阿里妈妈营销产品\"\n root = etree.HTML(almm_item[\"original\"])\n almm_item[\"content\"] = \"\".join(root.xpath('string(.)').replace('.article-content pre{white-space: normal;}', ''))\n\n almm_item[\"img_list\"] =str(root.xpath(\"//img/@src\"))\n\n yield almm_item\n","sub_path":"HotSpider/HotSpider/spiders/alimama.py","file_name":"alimama.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"378429337","text":"# encoding=utf-8 #\n__author__ = 'Leon Lu'\n\nimport optparse,os,logging,sys\nfrom mysqlToolkit import schema,synctd,utils\n\nAPPLICATION_VERSION='0.1.0'\nAPPLICATION_NAME='MYSQL TOOLKIT TABLE DATA SYNC'\nLOG_FILENAME='mysqlToolkit_table_data_sync.log'\nDATE_FORMAT = \"%Y%m%d\"\nPATCH_TPL = \"\"\"--\n-- Table Data Sync %(app_version)s %(type)s\n-- Created: %(created)s\n-- Server Version: %(server_version)s\n-- Apply To: %(target_host)s/%(target_database)s\n--\n%(data)s\"\"\"\n\n# perm_functionInfo\n# mesg_template\n# comm_dictionary\n# fund_serialshowconfig\n# # loan_landlord_type\n\ndef parse_cmd_line(fn):\n def processer(*args,**kwargs):\n usage = \"\"\" %prog [options] \n source/target format: mysql://user:pass@host:port/database user,city,country\n \"\"\"\n description = \"\"\"A MySQL Table Data Synchronization Utility\"\"\"\n\n parser=optparse.OptionParser(usage=usage,description=description)\n parser.add_option('-V','--version',action='store_true',dest='show_version',default=False,help=('Show version and exit.'))\n parser.add_option('-R','--reversion',action='store_true',dest='version_filename',default=False,help=('increment the migration script version if a file with the same name already exists.'))\n parser.add_option('-O','--output-directory',dest='output_directory',default=os.getcwd(),help=('directory to write the migration scrips.The default is current working directory.Must use absolute path if provided.'))\n parser.add_option('-L','--log-directory',dest='log_directory',help=('set the directory to write the log to.Must use absolute path if provided.Default is output directory.Log filename is schemasync.log'))\n parser.add_option('-i','--ignore-op',dest='ignore_operations',help=(\"\"\"ignore spec.s operations when gerenating schema sync scripts:\n d - ignore delete records operations\n c - ignore insert records operations\"\"\"))\n\n options,args = parser.parse_args()\n\n if options.show_version:\n print(APPLICATION_VERSION)\n return 0\n\n if (not args) or (len(args)<2):\n parser.print_help()\n return 0\n\n return fn(*args,**dict(version_filename=options.version_filename,output_directory=options.output_directory,log_directory=options.log_directory,ignore_operations=options.ignore_operations))\n return processer\n\n\ndef app(sourcedb='',targetdb='',table_list='',version_filename=False,output_directory=None,log_directory=None,ignore_operations=None):\n logging.basicConfig(level=logging.DEBUG,format='%(levelname)s - [%(asctime)s]:%(message)s')\n options={}\n options['sourcedb']=sourcedb\n options['targetdb']=targetdb\n options['table_list']=table_list.split(',')\n options['version_filename']=version_filename\n options['output_directory']=output_directory\n options['log_directory']=log_directory\n options['ignore_operations']=ignore_operations\n\n if len(table_list)=='':\n logging.error('Error:tables must be specified for data sync.Exiting.')\n return 1\n if not os.path.isabs(output_directory):\n logging.error('Error:Output directory must be an absolute path.Exiting.')\n return 1\n if not os.path.isdir(output_directory):\n logging.error('Error:Output directory does not exist.Exiting.')\n return 1\n if not log_directory or not os.path.isdir(log_directory):\n if log_directory:\n logging.info('Log directory does not exist,writing log to {}'.format(output_directory))\n log_directory=output_directory\n if not sourcedb:\n logging.error('Source database URL not provided.Exiting.')\n return 1\n if not targetdb:\n logging.error('Target database URL not provided.Exiting.')\n return 1\n # data transformation filters\n source_info=schema.parse_database_url(sourcedb)\n if not source_info.get('protocol') or not source_info.get('protocol').upper()=='MYSQL':\n logging.error('Source database must be mysql database,Exiting.')\n return 1\n if not source_info.get('db'):\n logging.error('Source database not provided,Exiting.')\n return 1\n target_info=schema.parse_database_url(targetdb)\n if not target_info.get('protocol') or not target_info.get('protocol').upper()=='MYSQL':\n logging.error('Target database must be mysql database,Exiting.')\n return 1\n if not target_info.get('db'):\n logging.error('Target database not provided,Exiting.')\n return 1\n source_obj=schema.DataBaseConnection()\n source_obj.connect(sourcedb)\n if source_obj.version < '5.0.0':\n logging.error('Source database mysql version is too low,please update your mysql version.')\n source_obj.close()\n return 1\n target_obj=schema.DataBaseConnection()\n target_obj.connect(targetdb)\n if target_obj.version<'5.0.0':\n logging.error('Target database mysql version is too low,please update your mysql version.')\n target_obj.close()\n return 1\n\n try:\n filters = lambda d:utils.REGEX_MULTI_SPACE.sub(' ',d),lambda d:utils.REGEX_DISTANT_SEMICOLIN.sub(';',d)\n # ctx = dict(app_version=APPLICATION_VERSION,\n # server_version=target_obj.version,\n # target_host=target_obj.host,\n # target_database=target_obj.selected.name,\n # created=datetime.datetime.now().strftime(TPL_DATE_FORMAT))\n p_fname =utils.create_pnames(target_obj.db,date_format=DATE_FORMAT,prefix='data_sync_')\n\n pBuffer = utils.PatchBuffer(name=os.path.join(output_directory,p_fname),filters=filters,tpl=None,ctx=None,version_filename=version_filename)\n\n db_selected=False\n\n for patch in synctd.sync_data(source_obj.databases[source_obj.db],target_obj.databases[target_obj.db],options):\n if patch:\n if not db_selected:\n pBuffer.write(source_obj.databases[source_obj.db].use()+'\\n')\n db_selected=True\n pBuffer.write(patch+'\\n')\n if not pBuffer.modified:\n logging.info('No migration scripts written. mysql://{}/{} and mysql://{}/{} were in sync.'.format(source_obj.host,source_obj.db,target_obj.host,source_obj.db))\n else:\n try:\n pBuffer.save()\n logging.info(\"Sql file have been saved to '{}'.\".format(pBuffer.name))\n except OSError as e:\n pBuffer.delete()\n logging.error('Error occurred,{}'.format(e))\n return 1\n # rBuffer = utils.PatchBuffer(name=os.path.join(output_directory,r_fname),filters=filters,tpl=None,ctx=None,version_filename=version_filename)\n finally:\n if source_obj:\n source_obj.close()\n if target_obj:\n target_obj.close()\n return pBuffer.name\n\ndef main():\n try:\n parse_cmd_line(app)()\n except Exception as e:\n logging.error('MySQL Error:{}'.format(e.args[0]))\n sys.exit(1)\n except KeyboardInterrupt:\n logging.error('Sync Interrupted,Exiting.')\n sys.exit(1)\n\nif __name__ == '__main__':\n main()","sub_path":"mysqlToolkit/datasync.py","file_name":"datasync.py","file_ext":"py","file_size_in_byte":7329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"487948788","text":"from bypy import ByPy\nimport os, time\nfrom threading import Thread\nfrom wxpy import *\n\nbot = Bot(console_qr=True)\nbp = ByPy()\n\ndef img_push():\n bot.file_helper.send('=========20180623=========') \n for root,dirs,files in os.walk('images'):\n for file in files: \n bot.file_helper.send_image('images/'+file)\n os.remove('images/'+file)\n\ndef img_up():\n for root,dirs,files in os.walk('images'):\n while len(files) > 0:\n file = files[0]\n files.pop(0)\n try:\n bp.upload('images/'+file, 'pixiv_ranking/%s/' % time.strftime(\"%Y%m%d\", time.localtime()))\n # print(file + ' successed!')\n except:\n pass\n img_push()\n\ndef main():\n tlist = []\n for i in range(50):\n t = Thread(target=img_up)\n tlist.append(t)\n\n for i in range(50):\n tlist[i].start()\n\n for i in range(50):\n tlist[i].join()\n\nif __name__ == '__main__':\n main()","sub_path":"ProjectCode/PixivRanking/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"482679049","text":"from django.urls import path\nfrom authentication.views import RegisterView, VerifyEmail, LoginAPIView\nfrom rest_framework_simplejwt.views import (\n TokenRefreshView,\n)\n\nurlpatterns = [\n path('register/', RegisterView.as_view(), name=\"register\"),\n path('login/', LoginAPIView.as_view(), name=\"login\"),\n path('email-verify/', VerifyEmail.as_view(), name=\"email-verify\"),\n path('token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),\n\n]\n","sub_path":"authentication/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"172994088","text":"from flask import Blueprint, jsonify, request\nfrom ..models import db, Wishlist, User, Book, Author\nfrom .errors import bad_request, invalid_get_target\n\n# Set up a Blueprint\nmod = Blueprint('api', __name__)\n\n\n@mod.route('/')\ndef index():\n return {\"message\": \"Hi. This is the API section\"}\n\n\n# #### Users #####\n\n\n@mod.route('/users', methods=['GET'])\ndef get_users():\n \"\"\" Return all users in our database\"\"\"\n users = db.session.query(User).all()\n response = {\n 'users': [u.to_dict() for u in users]\n }\n return jsonify(response)\n\n\n@mod.route('/users/', methods=['GET'])\ndef get_user(id):\n \"\"\" Return a single, targeted user\"\"\"\n try:\n user = db.session.query(User).filter(User.id == id).first()\n return jsonify(user.to_dict())\n except AttributeError as e:\n return invalid_get_target()\n\n\n@mod.route('/users', methods=['POST'])\ndef create_user():\n \"\"\" Create a new user in our database\"\"\"\n data = request.get_json() or {}\n if 'first_name' not in data or 'last_name' not in data or 'email' not in data or 'password' not in data:\n return bad_request('missing required fields: first_name, last_name, email, password')\n if db.session.query(User).filter_by(email=data['email']).first():\n return bad_request('email taken. Please use another.')\n user = User()\n user.from_dict(data)\n db.session.add(user)\n db.session.commit()\n response = jsonify(user.to_dict())\n response.status_code = 201\n return response\n\n\n@mod.route('/users/', methods=['PUT'])\ndef update_user(id):\n \"\"\" Modify an existing user in our database\"\"\"\n data = request.get_json() or {}\n user = db.session.query(User).filter(User.id == id).first()\n if user is None:\n return invalid_get_target()\n if 'email' in data and db.session.query(User).filter_by(email=data['email']).first():\n return bad_request('email taken. Please use another.')\n print(user)\n user.from_dict(data)\n print(user)\n db.session.commit()\n return jsonify(user.to_dict())\n\n# #### Books #####\n\n\n@mod.route('/books', methods=['GET'])\ndef get_books():\n \"\"\" Return all books in our database\"\"\"\n books = db.session.query(Book).all()\n response = {\n 'books': [b.to_dict() for b in books]\n }\n return response\n\n\n@mod.route('/books/', methods=['GET'])\ndef get_book(id):\n \"\"\" Return a single, targeted book\"\"\"\n try:\n book = db.session.query(Book).filter(Book.id == id).first()\n return book.to_dict()\n except AttributeError as e:\n return invalid_get_target()\n\n\n@mod.route('books', methods=['POST'])\ndef create_book():\n \"\"\" Create a new book in our database\"\"\"\n data = request.get_json() or {}\n print(data)\n if 'title' not in data or 'author_id' not in data or 'isbn' not in data or 'year_published' not in data:\n return bad_request('missing required fields: author_id, isbn, year_published')\n if db.session.query(Book).filter_by(isbn=data['isbn']).first() or \\\n db.session.query(Book).filter_by(title=data['title']).first():\n return bad_request('That book already exists in this database.')\n if db.session.query(Author).filter_by(id=data['author_id']).first is None:\n return bad_request(\"That author's not in our system. Add the author first.\")\n book = Book()\n book.from_dict(data)\n db.session.add(book)\n db.session.commit()\n response = jsonify(book.to_dict())\n response.status_code = 201\n return response\n\n\n# #### Authors #####\n\n\n@mod.route('authors', methods=['GET'])\ndef get_authors():\n \"\"\" Return all books in our database\"\"\"\n authors = db.session.query(Author).all()\n response = {\n 'authors': [a.to_dict() for a in authors]\n }\n return response\n\n\n@mod.route('/authors/', methods=['GET'])\ndef get_author(id):\n \"\"\" Return a single, targeted author\"\"\"\n try:\n author = db.session.query(Author).filter(Author.id == id).first()\n return author.to_dict()\n except AttributeError as e:\n return invalid_get_target()\n\n\n@mod.route('authors', methods=['POST'])\ndef create_author():\n data = request.get_json() or {}\n if 'first_name' not in data or 'last_name' not in data:\n return bad_request('missing required fields: first_name, last_name')\n author = Author()\n author.from_dict(data)\n db.session.add(author)\n db.session.commit()\n response = jsonify(author.to_dict())\n response.status_code = 201\n return response\n\n\n# #### Wishlists #####\n\n\n@mod.route('wishlists', methods=['GET'])\ndef get_wishlists():\n wishlists = db.session.query(Wishlist).all()\n response = {\n 'wishlists': [w.to_dict() for w in wishlists]\n }\n return response\n\n\n@mod.route('/users//wishlist/', methods=['POST'])\ndef add_book_to_wishlist(id, book_id):\n if not db.session.query(User).filter(User.id == id).first():\n return bad_request(f\"A user with id {id} does not exist.\")\n if not db.session.query(Book).filter(Book.id == book_id).first():\n return bad_request(f\"A book with id {book_id} does not exist.\")\n if db.session.query(Wishlist).filter(Wishlist.user_id == id, Wishlist.book_id == book_id).first():\n return bad_request(\"That book is already on this reader's wishlist.\")\n else:\n wishlist = Wishlist()\n wishlist.user_id = id\n wishlist.book_id = book_id\n db.session.add(wishlist)\n db.session.commit()\n response = jsonify(f\"Success! {book_id} has been added to {id}'s wishlist.\")\n response.status_code = 201\n return response\n\n\n@mod.route('/users//wishlist/', methods=['DELETE'])\ndef remove_book_from_wishlist(id, book_id):\n if not db.session.query(User).filter(User.id == id).first():\n return bad_request(f\"A user with id {id} does not exist.\")\n if not db.session.query(Book).filter(Book.id == book_id).first():\n return bad_request(f\"A book with id {book_id} does not exist.\")\n if not db.session.query(Wishlist).filter(Wishlist.user_id == id, Wishlist.book_id == book_id).first():\n return bad_request(\"That book isn't on this reader's wishlist.\")\n else:\n db.session.query(Wishlist).filter(Wishlist.user_id == id, Wishlist.book_id == book_id).delete(synchronize_session=False)\n db.session.commit()\n response = jsonify(f\"Success! {book_id} has been removed from {id}'s wishlist.'\")\n response.status_code = 201\n return response\n","sub_path":"wishlist_app/application/api/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":6492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"26309047","text":"from flask_restful import Resource, reqparse\nfrom flask_jwt import jwt_required\n\nfrom models.item import Item\n\n\nclass ItemResource(Resource):\n TABLE_NAME = 'items'\n\n parser = reqparse.RequestParser()\n parser.add_argument('price',\n type=float,\n required=True,\n help=\"This field cannot be left blank!\"\n )\n\n @jwt_required()\n def get(self, name):\n item = Item.find_by_name(name)\n if item:\n return item.json()\n return {'message': 'Item not found'}, 404\n\n @jwt_required()\n def post(self, name):\n if Item.find_by_name(name):\n return {'message': \"An item with name '{}' already exists.\".format(name)}\n\n data = ItemResource.parser.parse_args()\n\n item = Item(name, data['price'])\n\n try:\n item.save_to_db()\n except:\n return {'message': 'An error occurred while inserting the item.'}, 500\n\n return item.json()\n\n @jwt_required()\n def delete(self, name):\n item = Item.find_by_name(name)\n\n if item:\n item.delete_from_db()\n return {'message': 'Item deleted.'}\n return {'message': 'Item does not exist.'}, 404\n\n @jwt_required()\n def put(self, name):\n data = ItemResource.parser.parse_args()\n item = Item.find_by_name(name)\n\n if item is None:\n item = Item(name, data['price'])\n else:\n item.price = data['price']\n item.save_to_db()\n\n return item.json()\n\n\nclass ItemListResource(Resource):\n TABLE_NAME = 'items'\n\n def get(self):\n # return {'items': [item.json() for item in Item.query.all()]}\n return {'items': list(map(lambda x: x.json(), Item.query.all()))}\n","sub_path":"resources/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"45981927","text":"# -*- coding:utf-8 -*-\n\narray = input()\narray = array.split()\nfor i in range(len(array)):\n array[i] = float(array[i])\n\nW,H,x,y,r = array[0],array[1],array[2],array[3],array[4]\n\nif x-r >= 0 and y+r <= H and x+r <= W and y-r >= 0:\n print('Yes')\nelse:\n print('No')","sub_path":"Python_codes/p02394/s856842825.py","file_name":"s856842825.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"549324045","text":"import logging\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import logout, login, authenticate\nfrom django.contrib.auth.hashers import make_password\nfrom django.conf import settings\nfrom django.core.paginator import Paginator, InvalidPage, EmptyPage, PageNotAnInteger\nfrom django.db.models import Count\nfrom .models import *\nfrom .forms import *\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponse\nfrom users.models import Profile\nfrom utils.visit_info import change_info\n\nlogger = logging.getLogger('blog.views') # 日志器\n\n\n# 全局信息\ndef global_setting(request):\n # 配置media\n meida_url = settings.MEDIA_URL\n # 站点基本信息\n site_url = settings.SITE_URL # 站点地址\n site_name = settings.SITE_NAME # 站点名称\n site_desc = settings.SITE_DESC # 站点描述\n weibo_sina = settings.WEIBO_SINA # 新浪微博\n pro_email = settings.PRO_EMAIL # 邮箱\n pro_rss = settings.PRO_RSS # RSS订阅\n # 分类信息获取(导航数据)\n category_list = Category.objects.all()[:6]\n # 文章归档数据\n archive_list = Article.objects.distinct_date()\n # 标签云数据\n tag_list = Tag.objects.all()\n # 友情链接数据\n link_list = Links.objects.all()\n # 浏览排行\n click_count_list = Article.objects.order_by('-click_count')[:6]\n # 站长推荐\n is_recommend_list = Article.objects.order_by('-is_recommend')[:6]\n vm = VisitNumber.objects.first()\n\n return locals()\n\n\ndef blog_index(request):\n # 博客首页\n try:\n change_info(request)\n article_list = Article.objects.all()\n # 分页\n article_list = get_page(request, article_list)\n except Exception as e:\n logger.error(e)\n return render(request, 'blog/blog_index.html', locals())\n\n\ndef category(request, category):\n try:\n try:\n category = Category.objects.get(id=category)\n except Category.DoesNotExist:\n return render(request, 'blog/failure.html', {'reason': '分类不存在'})\n article_list = Article.objects.filter(category=category)\n article_list = get_page(request, article_list)\n except Exception as e:\n print(e)\n return render(request, 'blog/blog_index.html', locals())\n\n\ndef archive(request):\n try:\n # 先获取客户端提交的信息\n year = request.GET.get('year', None)\n month = request.GET.get('month', None)\n # 获取文章列表并分页\n article_list = Article.objects.filter(\n date_publish__icontains=year + '-' + month)\n article_list = get_page(request, article_list)\n except Exception as e:\n logger.error(e) # 异常捕获,写进日志器\n return render(request, 'blog/archive.html', locals())\n\n\n# 按标签查询对应的文章列表\ndef tag(request, tag):\n try:\n tag = Tag.objects.get(name=tag)\n article_list = tag.article_set.all()\n # 分页\n article_list = get_page(request, article_list)\n except Exception as e:\n logger.error(e)\n return render(request, 'blog/blog_index.html', locals())\n\n\n# 分页代码\ndef get_page(request, article_list):\n paginator = Paginator(article_list, 10)\n try:\n page = int(request.GET.get('page', 1))\n article_list = paginator.page(page)\n except (EmptyPage, InvalidPage, PageNotAnInteger):\n article_list = paginator.page(1) # 出现异常就返回第一页\n return article_list\n\n\n# 文章详情\ndef article_detail(request, article_id):\n try:\n try:\n article = Article.objects.get(pk=article_id)\n except Article.DoesNotExist:\n return render(request, 'blog/failure.html', {'reason': '没有找到对应的文章'})\n\n # 评论表单\n comments = ArticleUser.objects.filter(article_id=article_id).exclude(comment=None)\n\n # comment_list = []\n # for comment in comments:\n # for item in comment_list:\n # if not hasattr(item, 'children_comment'):\n # setattr(item, 'children_comment', [])\n # if comment.pid == item:\n # item.children_comment.append(comment)\n # break\n # if comment.pid is None:\n # comment_list.append(comment)\n\n article.views += 1\n article.save()\n\n except Exception as e:\n print(e)\n logger.error(e)\n\n return render(request, 'blog/article_detail.html', locals())\n\n\n@csrf_exempt\ndef comment_post(request):\n if request.method == 'POST':\n content = request.POST.get('content')\n article_id = request.POST.get('article_id')\n\n try:\n user_profile = Profile.objects.get(user=request.user)\n except Exception as e:\n print(e)\n user_profile = Profile.objects.create(user=request.user)\n\n try:\n ArticleUser.objects.create(comment=content, article_id=article_id, user=request.user)\n user_profile.point = int(user_profile.point) + 1\n user_profile.save()\n return HttpResponse('{\"status\":\"success\"}', content_type='application/json')\n except Exception as e:\n print(e)\n return HttpResponse('{\"status\":\"fail\"}', content_type='application/json')\n\n\n# 注销\ndef do_logout(request):\n try:\n logout(request)\n except Exception as e:\n print(e)\n \n return redirect(request.META['HTTP_REFERER'])\n\n\n# 注册\ndef do_reg(request):\n try:\n if request.method == 'POST':\n reg_form = RegForm(request.POST)\n if reg_form.is_valid():\n # 注册\n user = User.objects.create(username=reg_form.cleaned_data[\"username\"],\n email=reg_form.cleaned_data[\n \"email\"],\n url=reg_form.cleaned_data[\"url\"],\n password=make_password(reg_form.cleaned_data[\"password\"]),)\n user.save()\n\n # 登录\n # 指定默认的登录验证方式\n user.backend = 'django.contrib.auth.backends.ModelBackend'\n login(request, user)\n return redirect(request.POST.get('source_url'))\n else:\n return render(request, 'blog/failure.html', {'reason': reg_form.errors})\n else:\n reg_form = RegForm()\n except Exception as e:\n pass\n return render(request, 'blog/reg.html', locals())\n\n\n# 登录\ndef do_login(request):\n try:\n if request.method == 'POST':\n login_form = LoginForm(request.POST)\n if login_form.is_valid():\n # 登录\n username = login_form.cleaned_data[\"username\"]\n password = login_form.cleaned_data[\"password\"]\n user = authenticate(username=username, password=password)\n if user is not None:\n # 指定默认的登录验证方式\n user.backend = 'django.contrib.auth.backends.ModelBackend'\n login(request, user)\n else:\n return render(request, 'blog/failure.html', {'reason': '登录验证失败'})\n return redirect(request.POST.get('source_url'))\n else:\n return render(request, 'blog/failure.html', {'reason': login_form.errors})\n else:\n login_form = LoginForm()\n except Exception as e:\n print(e)\n return render(request, 'blog/../templates/users/login.html', locals())\n\n\ndef sandboxie(request):\n return render(request, 'blog/sandboxie.html')\n\n\ndef search(request):\n q = request.GET.get('q')\n article_list = Article.objects.filter(title__icontains=q)\n article_list = get_page(request, article_list)\n\n return render(request, 'blog/blog_index.html', locals())","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"629266441","text":"from kivy.app import App\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.core.window import Window\n\nWindow.size = (400, 600)\nWindow.clearcolor = (1, 1, 1, 1)\n\n\nclass DemoApp(App):\n def build(self):\n return DemoLayout()\n\nclass DemoLayout(BoxLayout):\n def turn_on_lights(self, value):\n if value:\n Window.clearcolor = (1,1,1,1)\n self.switch_text.color = (0,0,0,1)\n else:\n Window.clearcolor = (0,0,0,1)\n self.switch_text.color = (1,1,1,1)\n\n def color_slide(self, value):\n self.slider_value.color = (value, value, value, 1)\n\n def spinner_clicked(self, text):\n print(text)\n\n def checked(self, active):\n print(active)\n\nif __name__ == \"__main__\":\n demo = DemoApp()\n demo.run()","sub_path":"Notes/kivyB/kivyWidgets/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"587071652","text":"import time\nimport asyncio\nimport aiofiles\nimport aiohttp\n\nURI = 'https://api.bf4stats.com/api/onlinePlayers?output=json'\nMAX_CLIENTS = 500\n\n\nasync def fetch_async(pid):\n print('Fetch async process {} started'.format(pid))\n start = time.time()\n response = await aiohttp.request('GET', URI)\n datetime = response.headers.get('Date')\n response.close()\n async with aiofiles.open(str(pid) + '.out', mode='w') as f:\n await f.write(datetime)\n print('Process {}: {}, took: {:.2f} seconds'.format(\n pid, datetime, time.time() - start))\n return datetime\n\n\nasync def asynchronous():\n start = time.time()\n # Ensure future creates an event loop for us instead of doing asyncio.get_event_loop()\n tasks = [asyncio.ensure_future(fetch_async(i)) for i in range(1, MAX_CLIENTS + 1)]\n await asyncio.wait(tasks)\n print(\"TOTAL TIME TAKEN : {:.2f} seconds\".format(time.time() - start))\n\nprint('Fetching urls concurrently : \\n')\nioloop = asyncio.get_event_loop()\nioloop.run_until_complete(asynchronous())\nioloop.close()\n","sub_path":"concurrency_vs_parallelism/30min/8b_concurrent_httpcall.py","file_name":"8b_concurrent_httpcall.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"409166535","text":"from datetime import date, datetime\n\n\nclass BirthdayDate(object):\n date_of_birth = None\n\n def __init__(self, date_string):\n self.date_of_birth = date_string\n\n def get_future_date(self, age):\n date_object = datetime.strptime(self.date_of_birth, '%Y-%m-%d')\n future_date = date(\n date_object.year + int(age),\n date_object.month,\n date_object.day\n )\n return future_date\n\n def get_weekday_string(self, weekday_number):\n weekdays = [\n 'Monday',\n 'Tuesday',\n 'Wednesday',\n 'Thursday',\n 'Friday',\n 'Saturday',\n 'Sunday',\n ]\n return weekdays[weekday_number - 1]\n\n def get_birthday_weekday(self, age):\n future_date = self.get_future_date(age)\n weekday_number = future_date.isoweekday()\n return self.get_weekday_string(weekday_number)\n","sub_path":"code/birth_date.py","file_name":"birth_date.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"169865692","text":"from flask import Flask\nfrom flask import request\nfrom flask_restful import Resource, Api\nfrom flask_restful import reqparse\nfrom flaskext.mysql import MySQL\n\nmysql = MySQL()\napp = Flask(__name__)\n\n# MySQL configurations\napp.config['MYSQL_DATABASE_USER'] = 'root'\napp.config['MYSQL_DATABASE_PASSWORD'] = ''\napp.config['MYSQL_DATABASE_DB'] = 'itemlistdb'\napp.config['MYSQL_DATABASE_HOST'] = 'localhost'\n\nmysql.init_app(app)\napi = Api(app)\n\nclass GetUser(Resource):\n\tdef get(self):\n\t\ttry:\n\t\t\t# Parse the arguments\n\t\t\tparser = reqparse.RequestParser()\n\t\t\tparser.add_argument('name', type=str, help='Name of the user')\n\t\t\t#parser.add_argument('password', type=str, help='Password to create user')\n\t\t\targs = parser.parse_args()\n\n\t\t\t_userName = args['name']\n\t\t\t#_userPassword = args['password']\n\n\t\t\tconn = mysql.connect()\n\t\t\tcursor = conn.cursor()\n\n\t\t\tcursor.callproc('spGetUser',(_userName,))\n\t\t\tdata = cursor.fetchall()\n\t\t\tif (len(data)>0):\n\t\t\t\treturn {'StatusCode':'200','Message': 'User Exists'}\n\t\t\telse:\n\t\t\t\treturn {'StatusCode':'1000','Message': 'User not found'}\n\n\t\texcept Exception as e:\n\t\t\treturn {'error': str(e)}\n\napi.add_resource(GetUser, '/GetUser')\n\nif __name__ == '__main__':\n\tapp.run(debug=True)\n\n","sub_path":"4th sem/pm/project/RestAPI/get_api.py","file_name":"get_api.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"224484495","text":"# coding=utf8\n\nimport logging\nfrom logging.config import dictConfig\nfrom time import time\nfrom pytz import timezone, utc\nfrom dateutil import parser\nfrom datetime import datetime\nimport math\nimport redis\nfrom config import Config\n\ntz = timezone('Asia/Shanghai')\npool = redis.ConnectionPool(host=Config['redis']['host'], port=Config['redis']['port'], decode_responses=True)\n\n\nclass TimeMarker:\n mark_map = {}\n\n def mark(self, key=\"\"):\n self.mark_map[key] = time()\n\n def ttl(self, from_key=\"\"):\n if from_key in self.mark_map:\n return time() - self.mark_map[from_key]\n\n def print_ttl(self, from_key=\"\"):\n print(\"[{from_key}] Time Usage: {t} s\".format(from_key=from_key, t=self.ttl(from_key)))\n\n def clear(self):\n self.mark_map.clear()\n\n\ndef time_now():\n return int(time())\n\n\ndef datetime_now(format='%Y-%m-%d %H:%M:%S'):\n return datetime.now(tz=tz).strftime(format)\n\n\ndef timestamp2date(timestamp, format='%Y-%m-%d %H:%M:%S'):\n return datetime.fromtimestamp(int(timestamp), tz=tz).strftime(format)\n\n\ndef date2timestamp(date):\n naive_date = parser.parse(date)\n aware_date = tz.localize(naive_date)\n epoch = (aware_date - datetime(1970, 1, 1, tzinfo=utc)).total_seconds()\n return int(epoch)\n\n\ndef timestamp_today():\n date_now = datetime_now(format='%Y-%m-%d')\n return date2timestamp(date_now)\n\n\ndef str2array(string, delimiter=',', element_type=None):\n arr = string.strip(delimiter).split(delimiter)\n for k, v in enumerate(arr):\n if v != '':\n arr[k] = element_type(v) if element_type else v\n else:\n arr.remove(v)\n return arr\n\n\ndef array2str(array, delimiter=','):\n for k, v in enumerate(array):\n array[k] = str(v)\n return delimiter.join(array)\n\n\ndef logger(name=''):\n dictConfig({\n \"version\": 1,\n \"formatters\": {\n \"f\": {\"format\": \"[%(asctime)s %(levelname)-8s %(name)-8s]: %(message)s\"}\n },\n \"handlers\": {\n \"h\": {\"formatter\": \"f\", \"class\": \"logging.StreamHandler\"}\n },\n \"root\": {\"handlers\": [\"h\"], \"level\": \"DEBUG\"}\n })\n logging.basicConfig(format=\"[%(asctime)s %(levelname)-8s %(name)-8s]: %(message)s\",\n # '[%(asctime)s][%(levelname)s]: %(message)s',\n datefmt=\"%H:%M:%S\",\n filename=\"receive.log\",\n level=logging.DEBUG)\n # logger = logging.getLogger(name=name)\n # logging.basicConfig(level=logging.INFO,\n # format='\"format\": \"[%(asctime)s ] %(levelname)-8s %(name)-8s: %(message)s\"',\n # )\n\n return logging\n\n\n#存储文件下载进度\nclass SaveProgress(object):\n def __init__(self):\n self.rds = redis.Redis(connection_pool=pool)\n\n def save_progress(self, frame_index, script_info):\n try:\n print('in save progress frame_index:',frame_index)\n version = script_info['version']\n filename = script_info['script_name']\n frame_num = script_info['frame_num']\n device_id = script_info['device_id']\n s_or_f = script_info['s_or_f']\n dtype = script_info['type']\n progress = frame_index / frame_num * 100\n \n print('version:{},info:{},progress:{}'.format(version,script_info,progress))\n #_version = self.rds.hget('downloads', filename+'_version')\n #_progress = self.rds.hget('downloads', filename+'_progress')\n \n #print('redis get info:version--{},progress--{}'.format(_version,_progress))\n\n #進度更新\n #self.rds.hset('downloads', filename+'_version', version)\n #self.rds.hset('downloads', filename+'_progress', progress)\n self.rds.hset('downloads', filename, '++'+version+'++'+str(progress)+'++'+str(device_id)+\"++\" + s_or_f + \"++\" + str(dtype))\n\n #test\n allpro = self.rds.hgetall('downloads')\n print('allpro:',allpro)\n\n except Exception as e:\n print(\"progress save failed:\", e)\n\n","sub_path":"app/util/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":4092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"618254324","text":"#coding:utf-8\n\nfrom wtforms_tornado import Form\nfrom wtforms.compat import iteritems\nfrom tornado.escape import native_str as _str, json_decode\nfrom functools import wraps\n\ndef dict_native_str(old_dict):\n return {_str(name): [value,] for name, value in old_dict.iteritems()}\n\ndef parse_argument(Form):\n def deco(func):\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n if self.request.arguments or self.request.method in ('GET',):\n self.form = Form(self.request.arguments)\n else:\n arguments = dict_native_str(json_decode(self.request.body))\n self.form = Form(arguments)\n if self.form.validate():\n self.arguments = self.form.data\n return func(self, *args, **kwargs)\n else:\n raise Exception(self.form.errors)\n return wrapper\n return deco\n\nclass FixedForm(Form):\n def __init__(self, *args, **kwargs):\n self.formdata = args[0]\n super(Form, self).__init__(*args, **kwargs)\n\n @property\n def data(self):\n def _(f):\n if f.data is None or f.data == '':\n return f.default\n return f.data\n return dict((name, _(f)) for name, f in iteritems(self._fields) if (name in self.formdata) or f.flags.required\n or f.default is not None)\n\ndef code(code, reason=None):\n ret = {'code': code}\n if reason:\n ret.update({'reason': reason})\n return ret\n\n\nclass Pagination(object):\n def __init__(self, entires, page_count, total):\n self.entires = entires\n self.page_count = page_count\n self.total = total\n\n\ndef paginate(ins, q, **kwargs):\n page = kwargs.pop('page')\n page_size = kwargs.pop('page_size')\n if page_size > 0:\n offset = (page - 1) * page_size\n else:\n offset = None\n if kwargs:\n q = q.filter_by(**kwargs)\n total = q.count()\n if offset:\n q = q.offset(offset)\n if page_size > 0:\n q = q.limit(page_size)\n entires = q.all()\n if (offset is not None) and (page_size is not None):\n page_count = total / page_size if total % page_size == 0 else total / page_size + 1\n else:\n page_count = 1\n return Pagination(entires, page_count, total)\n\ndef login_required(func):\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n return func(self, *args, **kwargs)\n return wrapper\n\ndef not_none(value):\n if value is None:\n return ''\n else:\n if isinstance(value, str):\n return value.decode('utf-8')\n elif isinstance(value, unicode):\n return value\n else:\n return unicode(value)\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"648593833","text":"from st_aggrid import AgGrid, GridOptionsBuilder, DataReturnMode, GridUpdateMode\nimport pandas as pd\n\ndef interactive_datatable(df):\n ''' \n Use st_aggrid to show well table in GUI with interactive sorting and filtering options.\n \n df: input dataframe\n \n Returns: dataframe of AgGrid output (including filtering via GUI)\n '''\n #set up aggrid display/interactivity options\n gb = GridOptionsBuilder.from_dataframe(df)\n gb.configure_default_column(groupable=True, value=True, enableRowGroup=True, aggFunc='sum', editable=False)\n gb.configure_selection(\"single\", use_checkbox=False, groupSelectsChildren=False, groupSelectsFiltered=False)\n gb.configure_grid_options(domLayout='normal')\n gb.configure_pagination(paginationAutoPageSize=True)\n gridOptions = gb.build()\n \n #set up data return/update modes\n return_mode_value = list(DataReturnMode.__members__)[1]\n update_mode_value = list(GridUpdateMode.__members__)[6]\n \n data_return_mode = return_mode_value\n \n grid_data = AgGrid(df, \n gridOptions=gridOptions,\n width='100%',\n data_return_mode=return_mode_value,\n update_mode=update_mode_value,\n fit_columns_on_grid_load=True)\n \n grid_data_df = pd.DataFrame(grid_data['data'])\n \n return grid_data,grid_data_df","sub_path":"my_first_app/aggrid.py","file_name":"aggrid.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"592837626","text":"'''\nCreated on Sep 9, 2010\n\n@author: broken\n'''\n\nfrom google.appengine.api import blobstore\nfrom DataFactory import dbPages\nfrom DataFactory import dbImageStore\nfrom DataFactory import dbUser\nfrom PageService import PageTemplates\nfrom PageService import Page\nfrom PageService.PageTypes import PageType\n\nimport Users\nimport Utils\nimport PageService\nimport Settings\nimport ImageStore\nimport logging\n\nclass GetHandler:\n def __init__(self, path, *args):\n self.pathList = Utils.parsePath(path[1])\n \n args[0].statusCode = args[1].getvalue('status')\n args[0].statusMessage = args[1].getvalue('message')\n \n if not Users.isUserAuthenticated():\n if not args[1].getvalue('status'):\n args[0].statusCode = '-1'\n args[0].statusMessage = 'You don\\' have access to this page'\n args[0].templateFile = 'edit/login.html'\n else:\n self.preparePage(*args)\n func = getattr(self, self.pathList[0])\n func(*args)\n \n def preparePage(self, view, query):\n pages = dbPages.Pages.gql('ORDER BY sortIndex').fetch(1000)\n view.currentUser = Users.getCurrentUser()\n view.currentPage = None\n \n view.pageTree = PageService.build_tree(pages)\n view.pages = pages\n view.settings = Settings\n\n def getPageData(self, view):\n if view.currentPage.templateType == 'PageService.PageTemplates.PageContainer':\n self.pathList[0] = 'pagecontainer'\n else:\n pageTemplateType = view.currentPage.templateType.split('.')[-1]\n pageTemplate = getattr(PageTemplates, pageTemplateType, None)\n view.pageTemplate = pageTemplate(page = view.currentPage)\n view.pageTemplate.addModules()\n view.imageList = dbImageStore.ImageStore.all()\n \n def main(self, view, query):\n view.templateTypes = Utils.getPageTemplates(PageTemplates, PageType)\n view.templateFile = 'edit/' + self.pathList[0] + '.html'\n \n def imageStore(self, view, query):\n if query.getvalue('imageId'):\n view.currentImage = dbImageStore.ImageStore.get_by_id(int(query.getvalue('imageId')))\n view.currentImageDescription = dbImageStore.ImageDescription.gql('WHERE imageEntry = :imageEntry', imageEntry = view.currentImage.key())\n \n view.uploadUrl = blobstore.create_upload_url('/edit/action/AddUpdateImageStore')\n view.imageList = dbImageStore.ImageStore.all()\n view.templateFile = 'edit/' + self.pathList[0] + '.html'\n \n def users(self, view, query):\n if Users.hasPremission(view, 3):\n if query.getvalue('userId'):\n view.currentUser = dbUser.User.get_by_id(int(query.getvalue('userId')))\n \n view.userList = dbUser.User.all()\n view.templateFile = 'edit/' + self.pathList[0] + '.html'\n \n \n def page(self, view, query):\n if query.getvalue('pageId'):\n view.currentPage = dbPages.Pages.get_by_id(int(query.getvalue('pageId')))\n self.getPageData(view)\n if query.getvalue('pageName'):\n view.currentPage = dbPages.Pages.get_by_key_name(query.getvalue('pageName'))\n self.getPageData(view)\n \n view.templateFile = 'edit/' + self.pathList[0] + '.html'\n \n def logout(self, view, query):\n Users.doLogout()\n \n view.statusCode = '1'\n view.statusMessage = 'User has been logged out.'\n view.templateFile = 'edit/login.html'\n \nclass PostHandler:\n def __init__(self, path, *args):\n self.pathList = Utils.parsePath(path[1])\n func = getattr(self, self.pathList[1])\n func(*args) \n\n def login(self, view, post):\n view.StatusMessage = Users.doLogin(post.get('username'), post.get('password'))\n view.redirect = '/edit/?status=' + str(view.StatusMessage['status']) + '&message=' + view.StatusMessage['message']\n \n def AddUpdatePage(self, view, post):\n view.StatusMessage = Page.AddOrUpdate(post)\n if view.StatusMessage['pageId'] == 'None':\n view.redirect = '/edit/page/?pageName=' + view.StatusMessage['pageName'] + '&status=' + str(view.StatusMessage['status']) + '&message=' + view.StatusMessage['message']\n else:\n view.redirect = '/edit/page/?pageId=' + view.StatusMessage['pageId'] + '&status=' + str(view.StatusMessage['status']) + '&message=' + view.StatusMessage['message']\n \n def AddUpdateContent(self, view, post):\n view.StatusMessage = Page.AddUpdateContent(post)\n view.redirect = '/edit/page/?pageId=' + view.StatusMessage['pageId'] + '&status=' + str(view.StatusMessage['status']) + '&message=' + view.StatusMessage['message']\n \n def AddUpdatePageSettings(self, view, post):\n view.StatusMessage = Page.AddUpdatePageSettings(post)\n view.redirect = '/edit/page/?pageId=' + view.StatusMessage['pageId'] + '&status=' + str(view.StatusMessage['status']) + '&message=' + view.StatusMessage['message']\n \n def AddOrUpdateUser(self, view, post):\n view.StatusMessage = Users.AddOrUpdate(post)\n view.redirect = '/edit/users/?status=' + str(view.StatusMessage['status']) + '&message=' + view.StatusMessage['message']\n \n def DeleteUser(self, view, post):\n view.StatusMessage = Users.DeleteUser(post)\n view.redirect = '/edit/users/?status=' + str(view.StatusMessage['status']) + '&message=' + view.StatusMessage['message']\n \n def DeleteImage(self, view, post):\n view.StatusMessage = ImageStore.DeleteImage(post) \n view.redirect = '/edit/imageStore/?status=' + str(view.StatusMessage['status']) + '&message=' + view.StatusMessage['message'] \n \n def DeletePage(self, view, post):\n view.StatusMessage = Page.DeletePage(post)\n \n if view.StatusMessage['pageId'] == '0':\n view.redirect = '/edit/?status=' + str(view.StatusMessage['status']) + '&message=' + view.StatusMessage['message']\n else:\n view.redirect = '/edit/page/?pageId=' + view.StatusMessage['pageId'] + '&status=' + str(view.StatusMessage['status']) + '&message=' + view.StatusMessage['message']","sub_path":"PageController/EditView/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"296092600","text":"import token_hidden\nimport telebot\nimport premieres_parse\n\ndef log(message, answer):\n\tprint(\"\\n======================================\")\n\timport time\n\tprint('• Время:', time.strftime(\"%H:%M:%S | %d-%b-%Y\"))\n\tprint(\"• Автор: {} {} (id = {})\\n• Текст: {}\".format(message.from_user.first_name,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t message.from_user.last_name,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t str(message.from_user.id),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t message.text))\n\tprint(\"• Ответ: {}\".format(answer))\n\ndef main():\n\ttoken = token_hidden.token\n\tbot = telebot.TeleBot(token)\n\n\tprint(\"Бот запущен!\")\n\n\t@bot.message_handler(commands = [\"premieres_all\"])\n\tdef handle_text(message):\n\t\tbot.send_message(message.from_user.id, \"\\U0001F4C5 --- *Все премьеры на сегодня* --- \\U0001F4C5\", parse_mode = 'Markdown')\n\t\tbox = premieres_parse.premieres_all()\n\t\tfor movie_string in box:\n\t\t\tbot.send_message(message.from_user.id, movie_string,\n\t\t\t\t\t\t\t parse_mode = 'Markdown',\n\t\t\t\t\t\t\t disable_notification = True,\n\t\t\t\t\t\t\t disable_web_page_preview = True\n\t\t\t\t\t\t\t )\n\t\tbot.send_message(message.from_user.id, '\\U0001F3AC Выбирайте по настроению\\n\\U00002600 Приятного просмотра!')\n\t\tanswer = \"*{} сообщений*\".format(len(box))\n\t\tlog(message, answer)\n\n\n\t@bot.message_handler(commands = [\"premieres_this_week\"])\n\t\t\t\t\n\tdef handle_text(message):\n\t\tbot.send_message(message.from_user.id, premieres_parse.day(), parse_mode = 'Markdown')\n\t\tbox = premieres_parse.premieres_this_week()\n\t\timport re\n\t\timport requests\n\t\tfor movie_string in box:\n\t\t\ttry:\n\t\t\t\tmovie_id = re.findall(r\"film/(\\d{1,8})/\", movie_string)[0]\n\t\t\texcept IndexError:\n\t\t\t\tmovie_id = None\n\t\t\t\n\t\t\tif movie_id != None:\n\t\t\t\tposter_url = 'https://st.kp.yandex.net/images/film_iphone/iphone360_{}.jpg'.format(movie_id)\n\t\t\t\tstatus_code = requests.get(poster_url, allow_redirects = False).status_code\n\t\t\telse:\n\t\t\t\tstatus_code = None\n\t\t\tif status_code != None and status_code != 302:\n\t\t\t\tbot.send_photo(message.from_user.id, poster_url, caption = movie_string, parse_mode = 'Markdown', disable_notification = True)\n\t\t\telse:\n\t\t\t\tbot.send_message(message.from_user.id, movie_string, parse_mode = 'Markdown', disable_notification = True, disable_web_page_preview=True)\n\t\tbot.send_message(message.from_user.id, '\\U0001F3AC Выбирайте по настроению\\n\\U00002600 Приятного просмотра!')\n\t\tanswer = \"*{} сообщений*\".format(len(box))\n\t\tlog(message, answer)\n\n\n\t@bot.message_handler(content_types = [\"text\"])\n\tdef handle_text(message):\n\t\tanswer = '{} - {}'.format(message.from_user.first_name, message.text)\n\t\tbot.send_message(message.from_user.id, answer)\n\t\tlog(message, answer)\n\tbot.polling(none_stop = True, interval = 0)\n\nif __name__ == '__main__':\n\tmain()","sub_path":"premiere_almaty_bot/bot_test.py","file_name":"bot_test.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"366069272","text":"import numpy as np\nimport abc\nimport random\nfrom sklearn import preprocessing\n\nclass AnchorSelector(object):\n\tdef __init__(self, m,n):\n\t\tself.m = m\n\t\tself.n = n\n\n\t__metaclass__ = abc.ABCMeta\n\t@abc.abstractmethod\n\tdef anchor_select(self):\n\t\tpass\n\n\nclass RWalkAnchorSelector(AnchorSelector):\n\t\"\"\"随机游走锚点选择\"\"\"\n\tdef anchor_select(self,data,q,TransM):\n\t\tTransVU,TransUV = TransM\n\t\t#初始节点分布向量\n\t\tprobU,probV = np.ones((self.m,1)),np.ones((self.n,1))\t\n\t\tprobU[:],probV[:] = 1/self.m,1/self.n\n\t\twhile True:\n\t\t\talpha=0.8\n\t\t\tprobU_t = alpha*np.dot(TransVU,probV) + (1-alpha)/self.m\n\t\t\tprobV_t = alpha*np.dot(TransUV,probU) + (1-alpha)/self.n\n\t\t\tresidual = np.sum(abs(probU-probU_t))+np.sum(abs(probV-probV_t))\n\t\t\tprobU,probV = probU_t,probV_t\n\t\t\tif abs(residual)<1e-8:\n\t\t\t\tbreak \n\n\t\tpgu = [(i,j) for i,j in enumerate(probU)] #(id,pg_val)\t\n\t\tpgv = [(i,j) for i,j in enumerate(probV)]\n\t\tuanchor = sorted(pgu,reverse=True,key=lambda s: s[1])[:q]\n\t\tvanchor = sorted(pgv,reverse=True,key=lambda s: s[1])[:q]\n\t\t#print(\"anchoruser\",[i[0] for i in uanchor])\n\t\t#print(\"anchoritem\",[i[0] for i in vanchor])\n\t\trandom.shuffle(uanchor)\n\t\trandom.shuffle(vanchor)\n\t\tanchors = []\n\t\tfor m,n in zip(uanchor,vanchor): \n\t\t\tanchors.append((m[0],n[0]))\n\t\t#print(anchors)\n\n\t\treturn anchors","sub_path":"anchorselector.py","file_name":"anchorselector.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"369891085","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 1 07:28:37 2020\n\n@author: Cesar\n\"\"\"\n\nfrom datetime import datetime\nfrom copy import deepcopy\n\nimport wx\nimport wx.adv\nimport openpyxl\nimport smtplib\nfrom email.message import EmailMessage\n\n\ncol_requerimiento_auto=1\ncol_area_req=2\ncol_area=8\ncol_fecha_auto=3\ncol_cotizacion=4\ncol_nombrecliente=5\ncol_tipo_req=6\ncol_tipotransporte=7\ncol_contenedor=9\ncol_tipocont=10\ncol_requieredescargue=11\ncol_origen=12\ncol_destino=13\ncol_km=14\ncol_precio=15\ncol_recargotransporte=16\ncol_precio_recargo=17\ncol_nombreresponsable=18\ncol_telefono_resp=19\ncol_correo=20\ncol_nombresiso=21\ncol_telefono_siso=22\ncol_debeinfo=23\ncol_horasantes=24\ncol_fechaentrega=25\ncol_direccion=26\ncol_referenciacont=27\ncol_nombreconduc=28\ncol_cedula=29\ncol_telefonoconduc=30\ncol_placa=31\ncol_adiciones=32\ncol_preguntahoras=33\ncol_preguntadoc=34\ncol_consecutivo_tipo_req=35\n\n\nprincipal_color=wx.Colour(51, 102, 51)\nsecondary_color='white'\nyellow_color=(255, 203, 27)\n\npath_config='Config.xlsx'\nwb_config=openpyxl.load_workbook('Config.xlsx')\nsheet_config=wb_config['Config']\npath_db=sheet_config.cell(row=2,column=2).value\npath_remision=sheet_config.cell(row=3,column=2).value\npath_remision_A=sheet_config.cell(row=4,column=2).value\npath_remision_B=sheet_config.cell(row=5,column=2).value\npath_mov_dev_consecutivos=sheet_config.cell(row=22,column=2).value\npath_vta_alq_mod_consecutivos=sheet_config.cell(row=23,column=2).value\n\n##################################################### VENTANA INICIAL #############################################################################################################################\n\nclass MyFrame(wx.Frame):\n \n \n def OnKeyDown(self, event):\n \"\"\"quit if user press q or Esc\"\"\"\n if event.GetKeyCode() == 27 or event.GetKeyCode() == ord('Q'): #27 is Esc\n self.Close(force=True)\n \n else:\n event.Skip()\n \n def __init__(self):\n \n wx.Frame.__init__(self, None, wx.ID_ANY, \"Centro Logistico\", size=(575,290),style=wx.DEFAULT_FRAME_STYLE & ~(wx.RESIZE_BORDER | wx.MAXIMIZE_BOX)) \n self.Bind(wx.EVT_KEY_UP, self.OnKeyDown)\n self.SetBackgroundColour(secondary_color)\n self.panel = MainPanel(self)\n panel_font= wx.Font(10, wx.DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL,underline=False,faceName=\"Folks-Normal\")\n self.panel.SetFont(panel_font)\n \n \n self.Center()\n ico = wx.Icon('Cont.ico', wx.BITMAP_TYPE_ICO)\n self.SetIcon(ico)\n self.fgs= wx.GridBagSizer(0,0)\n \n title_font= wx.Font(15, wx.DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL,underline=False,faceName=\"Folks-Normal\")\n \n\n self.lbltitle =wx.StaticText(self.panel, label='Bienvenido al Centro Logistico de Contenedores de Antioquia')\n self.lbltitle.SetFont(title_font)\n \n self.lbltitle.SetBackgroundColour(secondary_color)\n self.lbltitle.SetForegroundColour(principal_color)\n self.fgs.Add(self.lbltitle,pos=(6,1),span=(1,5), flag=wx.LEFT | wx.ALIGN_CENTER, border=10)\n \n self.lbltitle2 =wx.StaticText(self.panel, label=' Que Desea Hacer ?')\n self.lbltitle2.SetFont(title_font)\n self.lbltitle2.SetBackgroundColour(secondary_color)\n self.lbltitle2.SetForegroundColour(principal_color)\n self.fgs.Add(self.lbltitle2,pos=(7,1),span=(1,5), flag=wx.LEFT | wx.ALIGN_CENTER, border=10)\n \n btn_nuevo_req = wx.Button(self.panel, id=wx.ID_ANY, size=(100,40), label=\"Nuevo\\nRequerimiento\")\n self.fgs.Add(btn_nuevo_req, pos=(9,2),span=(1,1),flag= wx.RIGHT| wx.ALIGN_CENTER| wx.EXPAND, border=20)\n btn_nuevo_req.Bind(wx.EVT_BUTTON, self.open_nuevo_req11)\n \n btn_logistico = wx.Button(self.panel, id=wx.ID_ANY,size=(100,40), label=\"Logistica\")\n self.fgs.Add(btn_logistico, pos=(9,3),span=(1,1), flag= wx.RIGHT | wx.ALIGN_CENTER| wx.EXPAND, border=20)\n btn_logistico.Bind(wx.EVT_BUTTON, self.open_logistica21)\n \n btn_imprimir = wx.Button(self.panel, id=wx.ID_ANY, size=(100,40),label=\"Imprimir\\nRemision\")\n self.fgs.Add(btn_imprimir, pos=(9,4),span=(1,1), flag= wx.RIGHT | wx.ALIGN_CENTER| wx.EXPAND, border=0)\n btn_imprimir.Bind(wx.EVT_BUTTON, self.open_imprimir11)\n\n mainSizer= wx.BoxSizer(wx.VERTICAL)\n mainSizer.Add(self.fgs,0, flag=wx.ALIGN_LEFT)\n self.panel.SetSizerAndFit(mainSizer)\n \n #-------------Button Functions-----------------#\n def open_nuevo_req11(self, event):\n ww_nuevo_requerimiento11(parent=self.panel).Show()\n \n\n def open_logistica21(self, event):\n ww_logistica21(parent=self.panel).Show()\n \n def open_imprimir11(self,evento):\n ww_remision11(parent=self.panel).Show()\n \n def configuracion(self, event):\n ww_configuracion(parent=self.panel).Show()\n\n #-------------Button Functions-----------------#\n\nclass MainPanel(wx.Panel):\n\n def __init__(self,parent):\n # create the panel\n wx.Panel.__init__(self, parent=parent)\n try:\n\n image_file = 'LOGOpng-01-100.png'\n bmp1 = wx.Image(\n image_file, \n wx.BITMAP_TYPE_ANY).ConvertToBitmap()\n # image's upper left corner anchors at panel \n # coordinates (0, 0)\n self.bitmap1 = wx.StaticBitmap(\n self, -1, bmp1, (190, 5))\n # show some image details\n #str1 = \"%s %dx%d\" % (image_file, bmp1.GetWidth(),\n #bmp1.GetHeight()) \n #parent.SetTitle(str1)\n except IOError:\n print (\"Image file %s not found\")\n raise SystemExit \n\n##################################################### ^ VENTANA INICIAL ^ #############################################################################################################################\n \n##################################################### NUEVO REQUERIMIENTO #############################################################################################################################\nclass ww_nuevo_requerimiento11(wx.Frame): \n \n def __init__(self,parent):\n \n wb_listas=openpyxl.load_workbook(path_config)\n req1_sheet=wb_listas['Requerimientos-1']\n \n areas=[]\n \n for cell in req1_sheet['A']:\n if cell.value != None:\n areas.append(cell.value)\n areas.pop(0)\n\n wx.Frame.__init__(self, None, wx.ID_ANY, \"Contenedores de Antioquia - Centro Logistico\", size=(250, 250),style=wx.DEFAULT_FRAME_STYLE & ~(wx.RESIZE_BORDER | wx.MAXIMIZE_BOX)) \n self.SetBackgroundColour(secondary_color)\n self.Center()\n try:\n self.panel=wx.Panel(self)\n panel_font= wx.Font(10, wx.DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL,underline=False,faceName=\"Folks-Normal\")\n self.panel.SetFont(panel_font)\n self.panel.SetBackgroundColour(secondary_color)\n\n except IOError:\n print (\"Image file %s not found\" )\n raise SystemExit\n \n ico = wx.Icon('Cont.ico', wx.BITMAP_TYPE_ICO)\n self.SetIcon(ico)\n self.fgs= wx.GridBagSizer(0,0)\n \n title_font= wx.Font(11, wx.FONTFAMILY_DECORATIVE, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL,underline=False,faceName=\"Folks-Normal\")\n \n self.lbltitle =wx.StaticText(self.panel, label='Nuevo Requerimiento Por:')\n self.lbltitle.SetFont(title_font)\n self.lbltitle.SetBackgroundColour(secondary_color)\n self.lbltitle.SetForegroundColour(principal_color)\n self.fgs.Add(self.lbltitle,pos=(2,1),span=(1,3), flag=wx.ALL | wx.ALIGN_CENTER, border=5)\n\n self.combo_area = wx.ComboBox(self.panel,value=areas[0], choices=areas)\n self.fgs.Add(self.combo_area , pos=(4,1),span=(1,3), flag= wx.ALL |wx.ALIGN_CENTER, border=5)\n \n btn_aceptar = wx.Button(self.panel, id=wx.ID_ANY, label=\"Aceptar\",size=(-1,-1))\n self.fgs.Add(btn_aceptar, pos=(6,1),span=(1,3), flag= wx.ALL | wx.ALIGN_CENTER, border=0)\n btn_aceptar.Bind(wx.EVT_BUTTON, self.open_nuevo_req12)\n\n mainSizer= wx.BoxSizer(wx.VERTICAL)\n mainSizer.Add(self.fgs,0, flag=wx.ALIGN_CENTER)\n self.panel.SetSizerAndFit(mainSizer)\n \n #-------------Button Functions-----------------# \n def open_nuevo_req12(self, event):\n\n self.Destroy()\n area_req=self.combo_area.GetValue()\n ww_nuevo_requerimiento12(parent=self.panel,message=area_req).Show()\n\nclass ww_nuevo_requerimiento12(wx.Frame):\n\n def __init__(self,parent,message):\n wb_req=openpyxl.load_workbook(path_db)\n self.area_selec=message\n \n wb_listas=openpyxl.load_workbook(path_config)\n req2_sheet=wb_listas['Requerimientos-12']\n hist_req_sheet=wb_req['Requerimientos']\n\n self.lista_encargado=[]\n self.lista_cont=[]\n self.lista_tipo_transp=[]\n self.lista_descargue=[]\n self.lista_debe_enviarinfo=[]\n self.lista_nro_req=[]\n self.lista_tiporeq=[]\n self.lista_tipocont=[]\n \n for cell in req2_sheet['A']:\n if cell.value != None:\n self.lista_encargado.append(cell.value)\n for cell in req2_sheet['B']:\n if cell.value != None:\n self.lista_cont.append(cell.value)\n for cell in req2_sheet['C']:\n if cell.value != None:\n self.lista_tipo_transp.append(cell.value)\n for cell in req2_sheet['E']:\n if cell.value != None:\n self.lista_descargue.append(cell.value) \n for cell in req2_sheet['F']:\n if cell.value != None:\n self.lista_debe_enviarinfo.append(cell.value)\n for cell in req2_sheet['G']:\n if cell.value != None:\n self.lista_tiporeq.append(cell.value)\n for cell in req2_sheet['H']:\n if cell.value != None:\n self.lista_tipocont.append(cell.value)\n \n \n for cell in hist_req_sheet['A']:\n if cell.value !=None:\n self.lista_nro_req.append(cell.value)\n \n try:\n self.nro_req= int(self.lista_nro_req[-1])+1\n except:\n self.nro_req=1\n\n \n self.lista_cont.pop(0)\n self.lista_tipo_transp.pop(0)\n self.lista_descargue.pop(0)\n self.lista_debe_enviarinfo.pop(0)\n self.lista_tiporeq.pop(0)\n self.lista_tipocont.pop(0)\n \n self.fila_vacia = 2\n \n while (hist_req_sheet.cell(row = self.fila_vacia, column = 1).value != None) :\n self.fila_vacia += 1\n \n wx.Frame.__init__(self, None, wx.ID_ANY, \"Centro Logistico\", size=(930, 585),style=wx.DEFAULT_FRAME_STYLE & ~(wx.RESIZE_BORDER | wx.MAXIMIZE_BOX)) \n self.SetBackgroundColour(secondary_color)\n self.panel = NuevoReqPanel(self)\n panel_font= wx.Font(10, wx.DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL,underline=False,faceName=\"Folks-Normal\")\n self.panel.SetFont(panel_font)\n self.Center()\n \n ico = wx.Icon('Cont.ico', wx.BITMAP_TYPE_ICO)\n self.SetIcon(ico)\n self.fgs= wx.GridBagSizer(0,0)\n title_font= wx.Font(25, wx.FONTFAMILY_DECORATIVE, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL,underline=False,faceName=\"Folks-Bold\")\n title_font3= wx.Font(15, wx.FONTFAMILY_DECORATIVE, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL,underline=False,faceName=\"Folks-Bold\")\n bold_font= wx.Font(10, wx.FONTFAMILY_DECORATIVE, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL,underline=False,faceName=\"Folks-Bold\")\n \n self.lbltitle2 =wx.StaticText(self.panel, label='CENTRO LOGISTICO')\n self.lblrequerimiento =wx.StaticText(self.panel, label='Requerimiento N° ' + str(self.nro_req))\n self.requerimiento_auto =(self.nro_req)\n self.lblfecha =wx.StaticText(self.panel, label='Fecha:')\n self.lblfecha_auto =wx.StaticText(self.panel, label=datetime.today().strftime('%d-%m-%Y')) #-%H:%M:%S\n self.lblarea_req =wx.StaticText(self.panel, label='Req. Por: ')\n self.lblarea_req_auto =wx.StaticText(self.panel, label=message)\n self.lblcotizacion =wx.StaticText(self.panel, label='Cotizacion N°')\n self.lbltipotransporte =wx.StaticText(self.panel, label='Tipo de Transporte')\n self.lblcontenedor =wx.StaticText(self.panel, label='Contenedor')\n self.lbltipocont=wx.StaticText(self.panel, label='Tipo')\n self.lblrequieredescargue =wx.StaticText(self.panel, label='Requiere Descargue')\n self.lblorigen =wx.StaticText(self.panel, label='Origen')\n self.lbldestino =wx.StaticText(self.panel, label='Destino')\n self.lblkm =wx.StaticText(self.panel, label='Km')\n self.lblprecio =wx.StaticText(self.panel, label='Precio')\n self.lblrecargotransporte =wx.StaticText(self.panel, label='Recargo Transporte')\n self.lblinfocliente =wx.StaticText(self.panel, label='INFORMACION CLIENTE')\n self.lblnombreresponsable =wx.StaticText(self.panel, label='Nombre Responsable')\n self.lbltelefono_resp =wx.StaticText(self.panel, label='Telefono')\n self.lbl_correo =wx.StaticText(self.panel, label='Correo')\n self.lblnombresiso =wx.StaticText(self.panel, label='Nombre SISO')\n self.lbltelefono_siso =wx.StaticText(self.panel, label='Telefono')\n self.lbldebeinfo =wx.StaticText(self.panel, label='Debe Enviarse\\nInformacion')\n self.lblhorasantes =wx.StaticText(self.panel, label='N° Horas Antes')\n self.lbltiporeq =wx.StaticText(self.panel, label='Tipo Requerimiento')\n self.lblnombrecliente =wx.StaticText(self.panel, label='Nombre Cliente')\n \n self.lbltitle2.SetFont(title_font)\n self.lblinfocliente.SetFont(title_font3)\n \n self.lblrequerimiento.SetFont(bold_font)\n self.lblfecha.SetFont(bold_font)\n self.lblfecha_auto.SetFont(bold_font)\n self.lblcotizacion.SetFont(bold_font)\n self.lbltipotransporte.SetFont(bold_font)\n self.lblcontenedor.SetFont(bold_font)\n self.lblrequieredescargue.SetFont(bold_font)\n self.lblorigen.SetFont(bold_font)\n self.lbldestino.SetFont(bold_font)\n self.lblkm.SetFont(bold_font)\n self.lblprecio.SetFont(bold_font)\n self.lblrecargotransporte.SetFont(bold_font)\n self.lblnombreresponsable.SetFont(bold_font)\n self.lbltelefono_resp.SetFont(bold_font)\n self.lbl_correo.SetFont(bold_font)\n self.lblnombresiso.SetFont(bold_font)\n self.lbltelefono_siso.SetFont(bold_font)\n self.lbldebeinfo.SetFont(bold_font)\n self.lblhorasantes.SetFont(bold_font)\n self.lbltiporeq.SetFont(bold_font)\n self.lblnombrecliente.SetFont(bold_font)\n self.lbltipocont.SetFont(bold_font)\n \n self.lblrequerimiento.SetFont(title_font3)\n self.lblfecha.SetFont(title_font3)\n self.lblfecha_auto.SetFont(title_font3)\n self.lblarea_req.SetFont(bold_font)\n self.lblarea_req_auto.SetFont(bold_font)\n \n \n self.lbltitle2.SetBackgroundColour(secondary_color)\n self.lblrequerimiento.SetBackgroundColour(secondary_color)\n self.lblfecha.SetBackgroundColour(secondary_color)\n self.lblfecha_auto.SetBackgroundColour(secondary_color)\n self.lblarea_req.SetBackgroundColour(secondary_color)\n self.lblarea_req_auto.SetBackgroundColour(secondary_color)\n self.lblcotizacion.SetBackgroundColour(secondary_color)\n self.lbltipotransporte.SetBackgroundColour(secondary_color)\n self.lblcontenedor.SetBackgroundColour(secondary_color)\n self.lblrequieredescargue.SetBackgroundColour(secondary_color)\n self.lblorigen.SetBackgroundColour(secondary_color)\n self.lbldestino.SetBackgroundColour(secondary_color)\n self.lblkm.SetBackgroundColour(secondary_color)\n self.lblprecio.SetBackgroundColour(secondary_color)\n self.lblrecargotransporte.SetBackgroundColour(secondary_color)\n self.lblinfocliente.SetBackgroundColour(secondary_color)\n self.lblnombreresponsable.SetBackgroundColour(secondary_color)\n self.lbltelefono_resp.SetBackgroundColour(secondary_color)\n self.lbl_correo.SetBackgroundColour(secondary_color)\n self.lblnombresiso.SetBackgroundColour(secondary_color)\n self.lbltelefono_siso.SetBackgroundColour(secondary_color)\n self.lbldebeinfo.SetBackgroundColour(secondary_color)\n self.lblhorasantes.SetBackgroundColour(secondary_color)\n self.lbltipocont.SetBackgroundColour(secondary_color)\n \n self.lbltitle2.SetForegroundColour(principal_color)\n self.lblrequerimiento.SetForegroundColour(principal_color)\n self.lblfecha.SetForegroundColour(principal_color)\n self.lblfecha_auto.SetForegroundColour(principal_color)\n self.lblarea_req.SetForegroundColour(principal_color)\n self.lblarea_req_auto.SetForegroundColour(principal_color)\n self.lblcotizacion.SetForegroundColour(principal_color)\n self.lbltipotransporte.SetForegroundColour(principal_color)\n self.lblcontenedor.SetForegroundColour(principal_color)\n self.lblrequieredescargue.SetForegroundColour(principal_color)\n self.lblorigen.SetForegroundColour(principal_color)\n self.lbldestino.SetForegroundColour(principal_color)\n self.lblkm.SetForegroundColour(principal_color)\n self.lblprecio.SetForegroundColour(principal_color)\n self.lblrecargotransporte.SetForegroundColour(principal_color)\n self.lblinfocliente.SetForegroundColour(principal_color)\n self.lblnombreresponsable.SetForegroundColour(principal_color)\n self.lbltelefono_resp.SetForegroundColour(principal_color)\n self.lbl_correo.SetForegroundColour(principal_color)\n self.lblnombresiso.SetForegroundColour(principal_color)\n self.lbltelefono_siso.SetForegroundColour(principal_color)\n self.lbldebeinfo.SetForegroundColour(principal_color)\n self.lblhorasantes.SetForegroundColour(principal_color)\n self.lbltiporeq.SetForegroundColour(principal_color)\n self.lblnombrecliente.SetForegroundColour(principal_color)\n self.lbltipocont.SetForegroundColour(principal_color)\n \n self.txtcotizacion=wx.TextCtrl(self.panel)\n self.txtorigen=wx.TextCtrl(self.panel)\n self.txtdestino=wx.TextCtrl(self.panel)\n self.txtkm=wx.TextCtrl(self.panel)\n self.txtprecio=wx.TextCtrl(self.panel)\n self.txtnombreresponsable=wx.TextCtrl(self.panel)\n self.txttelefono_resp=wx.TextCtrl(self.panel)\n self.txtcorreo=wx.TextCtrl(self.panel)\n self.txtnombresiso=wx.TextCtrl(self.panel)\n self.txttelefono_siso=wx.TextCtrl(self.panel)\n self.txthorasantes=wx.TextCtrl(self.panel)\n self.txtnombrecliente=wx.TextCtrl(self.panel)\n \n self.combotipotransporte=wx.ComboBox(self.panel,value=self.lista_tipo_transp[0], choices=self.lista_tipo_transp)\n self.combocontenedor=wx.ComboBox(self.panel,value=self.lista_cont[0], choices=self.lista_cont)\n self.combotipocont=wx.ComboBox(self.panel,value=self.lista_tipocont[0], choices=self.lista_tipocont)\n self.comborequieredescargue=wx.ComboBox(self.panel,value=self.lista_descargue[0], choices=self.lista_descargue)\n self.combotiporeq=wx.ComboBox(self.panel,value=self.lista_tiporeq[0], choices=self.lista_tiporeq)\n \n self.check_si_peaje = wx.CheckBox(self.panel, label= \"Si\")\n self.check_no_peaje = wx.CheckBox(self.panel, label='No')\n self.check_si_info = wx.CheckBox(self.panel, label= \"Si\")\n self.check_no_info = wx.CheckBox(self.panel, label='No')\n \n self.check_si_peaje.SetForegroundColour(principal_color)\n self.check_no_peaje.SetForegroundColour(principal_color)\n self.check_si_info.SetForegroundColour(principal_color)\n self.check_no_info.SetForegroundColour(principal_color)\n \n self.check_no_peaje.SetValue(True)\n\n btn_guardar = wx.Button(self.panel, id=wx.ID_OK, label=\"Guardar\",size=(-1,-1))\n btn_salir = wx.Button(self.panel, id=wx.ID_ANY, label=\"Salir\",size=(-1,-1))\n btn_adicionar_transp = wx.Button(self.panel, id=wx.ID_OK, label=\"Adicionar\",size=(-1,-1))\n \n order=(self.txtcotizacion,self.txtnombrecliente, self.combotiporeq,self.combotipotransporte,self.combocontenedor,self.combotipocont, self.comborequieredescargue,self.txtorigen,self.txtdestino,self.txtkm,self.txtprecio,\n self.check_si_peaje,self.check_no_peaje,self.txtnombreresponsable,self.txttelefono_resp,self.txtcorreo,self.txtnombresiso,self.txttelefono_siso,self.check_si_info,\n self.check_no_info,self.txthorasantes,btn_adicionar_transp,btn_guardar,btn_salir)\n \n for i in range(len(order) - 1):\n order[i+1].MoveAfterInTabOrder(order[i])\n \n self.fgs.Add(self.check_si_peaje, pos=(8,6),span=(1,1), flag= wx.ALL |wx.ALIGN_RIGHT, border=5)\n self.fgs.Add(self.check_no_peaje, pos=(8,7),span=(1,1), flag= wx.ALL |wx.ALIGN_LEFT, border=5)\n self.fgs.Add(self.check_si_info, pos=(15,2),span=(1,1), flag= wx.ALL |wx.ALIGN_LEFT, border=5)\n self.fgs.Add(self.check_no_info, pos=(16,2),span=(1,1), flag= wx.LEFT |wx.ALIGN_LEFT, border=5)\n \n self.fgs.Add(btn_adicionar_transp, pos=(17,6),span=(1,1), flag= wx.ALL | wx.ALIGN_CENTER, border=5)\n self.fgs.Add(btn_guardar, pos=(17,7),span=(1,1), flag= wx.ALL | wx.ALIGN_CENTER, border=5)\n self.fgs.Add(btn_salir, pos=(17,8),span=(1,1), flag= wx.ALL | wx.ALIGN_CENTER, border=5)\n \n self.fgs.Add(self.combotipotransporte,pos=(6,2),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.combocontenedor,pos=(7,2),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.combotipocont,pos=(8,2),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.comborequieredescargue,pos=(9,2),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.combotiporeq, pos=(5,2),span=(1,1), flag= wx.ALL, border=5)\n\n self.fgs.Add(self.txtcotizacion, pos=(4,2),span=(1,1), flag= wx.ALL , border=5)\n self.fgs.Add(self.txtnombrecliente, pos=(4,5),span=(1,2), flag= wx.ALL| wx.EXPAND, border=5)\n self.fgs.Add(self.txtorigen, pos=(6,4),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txtdestino, pos=(6,5),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txtkm, pos=(6,6),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txtprecio, pos=(8,5),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txtnombreresponsable, pos=(12,2),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txttelefono_resp, pos=(12,5),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txtcorreo, pos=(12,7),span=(1,2), flag= wx.ALL| wx.EXPAND, border=5)\n self.fgs.Add(self.txtnombresiso, pos=(14,2),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txttelefono_siso, pos=(14,5),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txthorasantes, pos=(17,2),span=(1,1), flag= wx.ALL, border=5)\n\n self.fgs.Add(self.lblnombrecliente , pos=(4,4),span=(1,1), flag= wx.ALL | wx.ALIGN_CENTER, border=5)\n self.fgs.Add(self.lbltitle2 , pos=(1,1),span=(1,8), flag= wx.ALL | wx.ALIGN_CENTER, border=5)\n self.fgs.Add(self.lblrequerimiento , pos=(2,1),span=(1,2), flag= wx.ALL|wx.ALIGN_BOTTOM, border=5)\n self.fgs.Add(self.lblfecha , pos=(2,7),span=(1,1), flag= wx.ALL | wx.ALIGN_RIGHT |wx.ALIGN_BOTTOM, border=0)\n self.fgs.Add(self.lblfecha_auto , pos=(2,8),span=(1,1), flag= wx.LEFT|wx.ALIGN_BOTTOM, border=5)\n self.fgs.Add(self.lblarea_req , pos=(3,7),span=(1,1), flag= wx.ALL|wx.ALIGN_TOP | wx.ALIGN_RIGHT, border=0)\n self.fgs.Add(self.lblarea_req_auto , pos=(3,8),span=(1,1), flag= wx.LEFT |wx.ALIGN_TOP, border=5)\n self.fgs.Add(self.lblcotizacion , pos=(4,1),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lbltiporeq , pos=(5,1),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lbltipotransporte , pos=(6,1),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lblcontenedor , pos=(7,1),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lbltipocont , pos=(8,1),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lblrequieredescargue, pos=(9,1),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lblorigen , pos=(5,4),span=(1,1), flag= wx.ALL | wx.ALIGN_CENTER, border=0)\n self.fgs.Add(self.lbldestino , pos=(5,5),span=(1,1), flag= wx.ALL| wx.ALIGN_CENTER, border=0)\n self.fgs.Add(self.lblkm , pos=(5,6),span=(1,1), flag= wx.ALL| wx.ALIGN_CENTER, border=0)\n self.fgs.Add(self.lblprecio , pos=(7,5),span=(1,1), flag= wx.ALL |wx.ALIGN_BOTTOM | wx.ALIGN_CENTER_HORIZONTAL, border=0)\n self.fgs.Add(self.lblrecargotransporte , pos=(7,6),span=(1,2), flag= wx.LEFT |wx.ALIGN_BOTTOM | wx.ALIGN_CENTER_HORIZONTAL, border=11)\n self.fgs.Add(self.lblinfocliente , pos=(10,1),span=(1,8), flag= wx.ALL| wx.ALIGN_CENTER, border=5)\n self.fgs.Add(self.lblnombreresponsable , pos=(12,1),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lbltelefono_resp , pos=(12,4),span=(1,1), flag= wx.ALL | wx.ALIGN_RIGHT, border=5)\n self.fgs.Add(self.lbl_correo , pos=(12,6),span=(1,1), flag= wx.ALL| wx.ALIGN_RIGHT, border=5)\n self.fgs.Add(self.lblnombresiso , pos=(14,1),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lbltelefono_siso , pos=(14,4),span=(1,1), flag= wx.ALL |wx.ALIGN_RIGHT, border=5)\n self.fgs.Add(self.lbldebeinfo , pos=(15,1),span=(2,1), flag= wx.ALL| wx.ALIGN_CENTER_VERTICAL, border=5)\n self.fgs.Add(self.lblhorasantes , pos=(17,1),span=(1,1), flag= wx.ALL, border=5)\n \n \n self.check_si_peaje.Bind(wx.EVT_CHECKBOX, self.onCheck_si_peaje)\n self.check_no_peaje.Bind(wx.EVT_CHECKBOX, self.onCheck_no_peaje)\n self.check_si_info.Bind(wx.EVT_CHECKBOX, self.onCheck_si_info)\n self.check_no_info.Bind(wx.EVT_CHECKBOX, self.onCheck_no_info)\n \n btn_guardar.Bind(wx.EVT_BUTTON, self.guardar_req)\n btn_salir.Bind(wx.EVT_BUTTON, self.salir)\n btn_adicionar_transp.Bind(wx.EVT_BUTTON, self.adicionar_transp)\n \n mainSizer= wx.BoxSizer(wx.VERTICAL)\n mainSizer.Add(self.fgs,0, flag=wx.ALIGN_LEFT)\n self.panel.SetSizerAndFit(mainSizer) \n \n self.txthorasantes.Hide()\n self.lblhorasantes.Hide()\n \n def onCheck_si_peaje(self,event):\n if self.check_no_peaje.IsChecked():\n self.check_no_peaje.SetValue(False)\n \n def onCheck_no_peaje(self,event):\n if self.check_si_peaje.IsChecked():\n self.check_si_peaje.SetValue(False)\n \n def onCheck_si_info(self,event):\n self.txthorasantes.Show()\n self.lblhorasantes.Show()\n \n if self.check_no_info.IsChecked():\n self.check_no_info.SetValue(False)\n \n def onCheck_no_info(self,event):\n self.txthorasantes.Hide()\n self.lblhorasantes.Hide()\n \n if self.check_si_info.IsChecked():\n self.check_si_info.SetValue(False)\n \n def precio_final(self,hist_req_sheet):\n wb_listas=openpyxl.load_workbook(path_config)\n config_sheet=wb_listas['Config']\n km=float(self.txtkm.GetValue())\n precio=int(self.txtprecio.GetValue())\n \n if self.check_si_peaje.IsChecked():\n limites_inf=[config_sheet.cell(row=11,column=1).value,config_sheet.cell(row=12,column=1).value,\n config_sheet.cell(row=13,column=1).value]\n \n limites_sup=[config_sheet.cell(row=11,column=3).value,config_sheet.cell(row=12,column=3).value,\n config_sheet.cell(row=13,column=3).value]\n \n factores=[config_sheet.cell(row=11,column=4).value,config_sheet.cell(row=12,column=4).value,\n config_sheet.cell(row=13,column=4).value]\n \n for i in range(len(factores)):\n if limites_inf[i] < km and km <=limites_sup[i]:\n precio_con_recargo= precio*factores[i]\n break\n \n print(precio_con_recargo)\n hist_req_sheet.cell(row=self.fila_vacia, column=col_precio_recargo).value=precio_con_recargo\n \n else:\n \n hist_req_sheet.cell(row=self.fila_vacia, column=col_precio_recargo).value=precio\n \n\n def guardar_req(self,event):\n \n \n if self.check_si_info.IsChecked():\n diccionario_campos_oblig_texto={self.txtcotizacion:'Cotizacion N°', self.txtnombrecliente:'Nombre Cliente', self.txtorigen:'Origen', self.txtdestino:'Destino', self.txtnombreresponsable:'Nombre Responsable', self.txttelefono_resp:'Telefono Resposnable', self.txthorasantes:'N° Horas Antes'}\n else:\n diccionario_campos_oblig_texto={self.txtcotizacion:'Cotizacion N°', self.txtnombrecliente:'Nombre Cliente',self.txtorigen:'Origen', self.txtdestino:'Destino', self.txtnombreresponsable:'Nombre Responsable', self.txttelefono_resp:'Telefono Resposnable'}\n \n diccionario_campos_oblig_numero={self.txtprecio:'Precio', self.txtkm: 'Km'}\n diccionario_campos_oblig_combos={self.combotipotransporte:'Tipo de Transporte', self.combotiporeq:'Tipo Requerimiento', self.combocontenedor:'Contenedor', self.combotipocont:'Tipo de Contenedor', self.comborequieredescargue:'Requiere Desacargue'} \n \n if self.validar_campos_vacios_texto(diccionario_campos_oblig_texto)==False or self.validar_campos_vacios_numero(diccionario_campos_oblig_numero)==False or self.validar_seleccion_combos(diccionario_campos_oblig_combos)==False:\n return\n \n wb_req=openpyxl.load_workbook(path_db)\n wb_listas=openpyxl.load_workbook(path_config)\n \n \n hist_req_sheet=wb_req['Requerimientos']\n req2_sheet=wb_listas['Requerimientos-12']\n config_sheet=wb_listas['Config']\n\n self.fila_vacia = 1\n \n while (hist_req_sheet.cell(row = self.fila_vacia, column = 1).value != None) :\n self.fila_vacia += 1\n \n for cell in hist_req_sheet['A']:\n if cell.value !=None:\n self.lista_nro_req.append(cell.value)\n try:\n self.nro_req=int(self.lista_nro_req[-1])+1\n except:\n self.nro_req=1\n\n requerimiento_auto=self.nro_req\n fecha_auto=self.lblfecha_auto.GetLabel()\n cotizacion=self.txtcotizacion.GetValue()\n nombre_cliente=self.txtnombrecliente.GetValue()\n tiporeq=self.combotiporeq.GetValue()\n tipotransporte=self.combotipotransporte.GetValue()\n contenedor=self.combocontenedor.GetValue()\n tipocont=self.combotipocont.GetValue()\n requieredescargue=self.comborequieredescargue.GetValue()\n origen=self.txtorigen.GetValue()\n destino=self.txtdestino.GetValue()\n km=self.txtkm.GetValue()\n precio=self.txtprecio.GetValue()\n nombreresponsable=self.txtnombreresponsable.GetValue()\n telefono_resp=self.txttelefono_resp.GetValue()\n correo=self.txtcorreo.GetValue()\n nombresiso=self.txtnombresiso.GetValue()\n telefono_siso=self.txttelefono_siso.GetValue()\n horasantes=self.txthorasantes.GetValue()\n \n if self.check_si_peaje.IsChecked():\n check_peaje=\"Si\"\n else:\n check_peaje=\"No\"\n \n if self.check_si_info.IsChecked():\n debeinfo=\"Si\"\n else:\n debeinfo=\"No\"\n \n self.dic_asosiacion={}\n self.lista_asociacion=[]\n self.lista_tipo_transp2=[]\n \n for cell in req2_sheet['D']:\n if cell != None:\n self.lista_asociacion.append(cell.value)\n \n for cell in req2_sheet['C']:\n if cell != None:\n self.lista_tipo_transp2.append(cell.value)\n \n \n for i in range((len(self.lista_tipo_transp2))):\n self.dic_asosiacion[self.lista_tipo_transp2[i]]=self.lista_asociacion[i]\n \n hist_req_sheet.cell(row=self.fila_vacia, column=col_requerimiento_auto).value=requerimiento_auto\n hist_req_sheet.cell(row=self.fila_vacia, column=col_area_req).value=self.area_selec\n hist_req_sheet.cell(row=self.fila_vacia, column=col_area).value=self.dic_asosiacion[tipotransporte]\n hist_req_sheet.cell(row=self.fila_vacia, column=col_fecha_auto).value=fecha_auto\n hist_req_sheet.cell(row=self.fila_vacia, column=col_cotizacion).value=cotizacion\n hist_req_sheet.cell(row=self.fila_vacia, column=col_nombrecliente).value=nombre_cliente\n hist_req_sheet.cell(row=self.fila_vacia, column=col_tipo_req).value=tiporeq\n hist_req_sheet.cell(row=self.fila_vacia, column=col_tipotransporte).value=tipotransporte\n hist_req_sheet.cell(row=self.fila_vacia, column=col_contenedor).value=contenedor\n hist_req_sheet.cell(row=self.fila_vacia, column=col_tipocont).value=tipocont\n hist_req_sheet.cell(row=self.fila_vacia, column=col_requieredescargue).value=requieredescargue\n hist_req_sheet.cell(row=self.fila_vacia, column=col_origen).value=origen\n hist_req_sheet.cell(row=self.fila_vacia, column=col_destino).value=destino\n hist_req_sheet.cell(row=self.fila_vacia, column=col_km).value=km\n hist_req_sheet.cell(row=self.fila_vacia, column=col_precio).value=precio\n hist_req_sheet.cell(row=self.fila_vacia, column=col_nombreresponsable).value=nombreresponsable\n hist_req_sheet.cell(row=self.fila_vacia, column=col_telefono_resp).value=telefono_resp\n hist_req_sheet.cell(row=self.fila_vacia, column=col_correo).value=correo\n hist_req_sheet.cell(row=self.fila_vacia, column=col_nombresiso).value=nombresiso\n hist_req_sheet.cell(row=self.fila_vacia, column=col_telefono_siso).value=telefono_siso\n hist_req_sheet.cell(row=self.fila_vacia, column=col_debeinfo).value=debeinfo\n hist_req_sheet.cell(row=self.fila_vacia, column=col_horasantes).value=horasantes\n hist_req_sheet.cell(row=self.fila_vacia, column=col_recargotransporte).value=check_peaje\n self.precio_final(hist_req_sheet)\n siguiente_consec= self.leer_consecutivo_tipo_reg(tiporeq,hist_req_sheet)\n \n \n if tipotransporte=='Propio':\n nombreconduc=config_sheet.cell(row=16,column=2).value\n cedula=config_sheet.cell(row=17,column=2).value\n telefonoconduc=config_sheet.cell(row=18,column=2).value\n placa=config_sheet.cell(row=19,column=2).value\n \n hist_req_sheet.cell(row=self.fila_vacia, column=col_nombreconduc).value=nombreconduc\n hist_req_sheet.cell(row=self.fila_vacia, column=col_cedula).value=cedula\n hist_req_sheet.cell(row=self.fila_vacia, column=col_telefonoconduc).value=telefonoconduc\n hist_req_sheet.cell(row=self.fila_vacia, column=col_placa).value=placa\n \n \n \n try:\n wb_req.save(path_db)\n if tiporeq=='Venta' or tiporeq=='Alquiler' or tiporeq=='Modificacion':\n with open(path_vta_alq_mod_consecutivos, 'a') as f:\n f.write('\\n'+str(siguiente_consec))\n elif tiporeq=='Movimiento' or tiporeq=='Devolucion':\n with open(path_mov_dev_consecutivos, 'a') as f:\n f.write('\\n'+str(siguiente_consec))\n \n self.txtcotizacion.Value=''\n self.combotipotransporte.Value=self.lista_tipo_transp[0]\n self.combocontenedor.Value=self.lista_cont[0]\n self.combotipocont.Value=self.lista_tipocont[0]\n self.comborequieredescargue.Value=self.lista_descargue[0]\n self.combotiporeq.Value=self.lista_tiporeq[0]\n self.txtorigen.Value=''\n self.txtnombrecliente.Value=''\n self.txtdestino.Value=''\n self.txtkm.Value=''\n self.txtprecio.Value=''\n self.txtnombreresponsable.Value=''\n self.txttelefono_resp.Value=''\n self.txtcorreo.Value=''\n self.txtnombresiso.Value=''\n self.txttelefono_siso.Value=''\n self.txthorasantes.Value=''\n self.check_no_peaje.SetValue(True)\n self.check_si_peaje.SetValue(False)\n self.check_no_info.SetValue(False)\n self.check_si_info.SetValue(False)\n self.Destroy()\n except Exception as e:\n print(e)\n error_msgbox=wx.MessageDialog(None,'Error al guardar el registro en la BD. \\nVerifique el el archivo de excel este cerrado y en la ruta correcta.','ERROR',wx.ICON_ERROR)\n error_msgbox.ShowModal()\n \n try:\n sheet_config=wb_listas['Config']\n area_correo=self.dic_asosiacion[tipotransporte]\n \n if area_correo=='Operaciones':\n receiver=sheet_config.cell(row=6, column=2).value\n correo_operaciones=\"\"\n elif area_correo=='Administracion':\n receiver=sheet_config.cell(row=7,column=2).value\n correo_operaciones=sheet_config.cell(row=6, column=2).value\n else:\n receiver=sheet_config.cell(row=8,column=2).value\n correo_operaciones=sheet_config.cell(row=6, column=2).value\n \n dic_info_cliente={}\n dic_info_cliente['nombreresponsable']=nombreresponsable\n dic_info_cliente['telefono_resp']=telefono_resp\n dic_info_cliente['correo']=correo\n dic_info_cliente['nombresiso']=nombresiso\n dic_info_cliente['telefono_siso']=telefono_siso\n dic_info_cliente['horasantes']=horasantes\n \n self.enviar_email(receiver, self.nro_req, area_correo, correo_operaciones,dic_info_cliente )\n \n except Exception as e:\n error_msgbox=wx.MessageDialog(None,'Hubo un problema al enviar el correo, valide su conexion a Internet','ERROR',wx.ICON_ERROR)\n error_msgbox.ShowModal()\n \n \n \n \n def enviar_email(self, receiver, nro_req, area_encargada, correo_operaciones,dic_info_cliente):\n \n EMAIL_ADDRESS='requerimientologisticocontant@gmail.com'\n EMAIL_PASSWORD='pewljcgvqnrjhegz'\n \n msg = EmailMessage()\n msg['Subject'] = 'Nuevo Requerimiento Logistico No. ' + str(nro_req) + \" // \" + str(datetime.today().strftime('%d-%m-%Y'))\n msg['From'] = EMAIL_ADDRESS\n msg['To'] = receiver\n \n if area_encargada != 'Operaciones':\n msg['CC']= correo_operaciones\n \n \n initial_html=\"\"\"\\\n \n \n \n \n \n \n

Nuevo Requerimiento

\n

Usted Tiene un Nuevo Requerimiento No : \"\"\"+str(nro_req)+\"\"\"

\n

Area Encargada : \"\"\"+area_encargada+\"\"\"

\n
\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
Info Cliente
Nombre Responsable\"\"\"+dic_info_cliente['nombreresponsable']+\"\"\"
Telefono Responsable \"\"\"+dic_info_cliente['telefono_resp']+\"\"\"
Correo Responsable\"\"\"+dic_info_cliente['correo']+\"\"\"
Nombre SISO\"\"\"+dic_info_cliente['nombresiso']+\"\"\"
Tellefono SISO\"\"\"+dic_info_cliente['telefono_siso']+\"\"\"
\n \n

Favor diríjase al centro logistico para darle tramite al requerimiento

\n

Contenedores de Antioquia

\n \n \"\"\"\n \n \n msg.add_alternative(initial_html, subtype='html')\n \n with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:\n smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)\n smtp.send_message(msg)\n\n\n def salir(self,event):\n salir_msgbox=wx.MessageBox('¿Esta seguro que desea salir sin guardar?','Salir sin Guardar',wx.YES_NO| wx.ICON_WARNING)\n \n if salir_msgbox == wx.YES:\n self.Destroy()\n else:\n pass\n \n \n def adicionar_transp(self,event):\n \n if self.check_si_info.IsChecked():\n diccionario_campos_oblig_texto={self.txtcotizacion:'Cotizacion N°', self.txtnombrecliente:'Nombre Cliente',self.txtorigen:'Origen', self.txtdestino:'Destino', self.txtnombreresponsable:'Nombre Responsable', self.txttelefono_resp:'Telefono Resposnable', self.txthorasantes:'N° Horas Antes'}\n else:\n diccionario_campos_oblig_texto={self.txtcotizacion:'Cotizacion N°', self.txtnombrecliente:'Nombre Cliente',self.txtorigen:'Origen', self.txtdestino:'Destino', self.txtnombreresponsable:'Nombre Responsable', self.txttelefono_resp:'Telefono Resposnable'}\n \n diccionario_campos_oblig_numero={self.txtprecio:'Precio', self.txtkm: 'Km'}\n diccionario_campos_oblig_combos={self.combotipotransporte:'Tipo de Transporte', self.combotiporeq:'Tipo Requerimiento', self.combocontenedor:'Contenedor', self.combotipocont:'Tipo de Contenedor', self.comborequieredescargue:'Requiere Desacargue'} \n \n if self.validar_campos_vacios_texto(diccionario_campos_oblig_texto)==False or self.validar_campos_vacios_numero(diccionario_campos_oblig_numero)==False or self.validar_seleccion_combos(diccionario_campos_oblig_combos)==False:\n return\n \n wb_req=openpyxl.load_workbook(path_db)\n wb_listas=openpyxl.load_workbook(path_config)\n \n hist_req_sheet=wb_req['Requerimientos']\n req2_sheet=wb_listas['Requerimientos-12']\n self.fila_vacia = 1\n \n while (hist_req_sheet.cell(row = self.fila_vacia, column = 1).value != None) :\n self.fila_vacia += 1\n \n for cell in hist_req_sheet['A']:\n if cell.value !=None:\n self.lista_nro_req.append(cell.value)\n try:\n self.nro_req=int(self.lista_nro_req[-1])+1\n except:\n self.nro_req=1\n \n requerimiento_auto=self.nro_req\n fecha_auto=self.lblfecha_auto.GetLabel()\n cotizacion=self.txtcotizacion.GetValue()\n nombre_cliente=self.txtnombrecliente.GetValue()\n tiporeq=self.combotiporeq.GetValue()\n tipotransporte=self.combotipotransporte.GetValue()\n contenedor=self.combocontenedor.GetValue()\n tipocont=self.combotipocont.GetValue()\n requieredescargue=self.comborequieredescargue.GetValue()\n origen=self.txtorigen.GetValue()\n destino=self.txtdestino.GetValue()\n km=self.txtkm.GetValue()\n precio=self.txtprecio.GetValue()\n nombreresponsable=self.txtnombreresponsable.GetValue()\n telefono_resp=self.txttelefono_resp.GetValue()\n correo=self.txtcorreo.GetValue()\n nombresiso=self.txtnombresiso.GetValue()\n telefono_siso=self.txttelefono_siso.GetValue()\n \n horasantes=self.txthorasantes.GetValue()\n \n if self.check_si_peaje.IsChecked():\n check_peaje=\"Si\"\n else:\n check_peaje=\"No\"\n \n if self.check_si_info.IsChecked():\n debeinfo=\"Si\"\n else:\n debeinfo=\"No\"\n \n self.dic_asosiacion={}\n self.lista_asociacion=[]\n self.lista_tipo_transp2=[]\n \n for cell in req2_sheet['D']:\n if cell != None:\n self.lista_asociacion.append(cell.value)\n \n for cell in req2_sheet['C']:\n if cell != None:\n self.lista_tipo_transp2.append(cell.value)\n \n \n for i in range((len(self.lista_tipo_transp2))):\n self.dic_asosiacion[self.lista_tipo_transp2[i]]=self.lista_asociacion[i]\n \n hist_req_sheet.cell(row=self.fila_vacia, column=col_requerimiento_auto).value=requerimiento_auto\n hist_req_sheet.cell(row=self.fila_vacia, column=col_area_req).value=self.area_selec\n hist_req_sheet.cell(row=self.fila_vacia, column=col_area).value=self.dic_asosiacion[tipotransporte]\n hist_req_sheet.cell(row=self.fila_vacia, column=col_fecha_auto).value=fecha_auto\n hist_req_sheet.cell(row=self.fila_vacia, column=col_cotizacion).value=cotizacion\n hist_req_sheet.cell(row=self.fila_vacia, column=col_nombrecliente).value=nombre_cliente\n hist_req_sheet.cell(row=self.fila_vacia, column=col_tipo_req).value=tiporeq\n hist_req_sheet.cell(row=self.fila_vacia, column=col_tipotransporte).value=tipotransporte\n hist_req_sheet.cell(row=self.fila_vacia, column=col_contenedor).value=contenedor\n hist_req_sheet.cell(row=self.fila_vacia, column=col_tipocont).value=tipocont\n hist_req_sheet.cell(row=self.fila_vacia, column=col_requieredescargue).value=requieredescargue\n hist_req_sheet.cell(row=self.fila_vacia, column=col_origen).value=origen\n hist_req_sheet.cell(row=self.fila_vacia, column=col_destino).value=destino\n hist_req_sheet.cell(row=self.fila_vacia, column=col_km).value=km\n hist_req_sheet.cell(row=self.fila_vacia, column=col_precio).value=precio\n hist_req_sheet.cell(row=self.fila_vacia, column=col_nombreresponsable).value=nombreresponsable\n hist_req_sheet.cell(row=self.fila_vacia, column=col_telefono_resp).value=telefono_resp\n hist_req_sheet.cell(row=self.fila_vacia, column=col_correo).value=correo\n hist_req_sheet.cell(row=self.fila_vacia, column=col_nombresiso).value=nombresiso\n hist_req_sheet.cell(row=self.fila_vacia, column=col_telefono_siso).value=telefono_siso\n hist_req_sheet.cell(row=self.fila_vacia, column=col_debeinfo).value=debeinfo\n hist_req_sheet.cell(row=self.fila_vacia, column=col_horasantes).value=horasantes\n hist_req_sheet.cell(row=self.fila_vacia, column=col_recargotransporte).value=check_peaje\n self.precio_final(hist_req_sheet)\n siguiente_consec= self.leer_consecutivo_tipo_reg(tiporeq,hist_req_sheet)\n \n self.combotipotransporte.Value=self.lista_tipo_transp[0]\n self.combocontenedor.Value=self.lista_cont[0]\n self.combotipocont.Value=self.lista_tipocont[0]\n self.txtprecio.Value=''\n self.comborequieredescargue.Value=self.lista_tipo_transp[0]\n \n try:\n wb_req.save(path_db)\n if tiporeq=='Venta' or tiporeq=='Alquiler' or tiporeq=='Modificacion':\n with open(path_vta_alq_mod_consecutivos, 'a') as f:\n f.write('\\n'+str(siguiente_consec))\n elif tiporeq=='Movimiento' or tiporeq=='Devolucion':\n with open(path_mov_dev_consecutivos, 'a') as f:\n f.write('\\n'+str(siguiente_consec))\n \n self.lista_nro_req=[]\n for cell in hist_req_sheet['A']:\n if cell.value !=None:\n self.lista_nro_req.append(cell.value)\n self.nro_req= int(self.lista_nro_req[-1])+1 \n self.lblrequerimiento.SetLabel(label='Requerimiento N° ' + str(self.nro_req))\n \n except Exception as e:\n error_msgbox=wx.MessageDialog(None,'Error al guardar el registro en la BD. \\nVerifique el el archivo de excel este cerrado y en la ruta correcta.','ERROR',wx.ICON_ERROR)\n error_msgbox.ShowModal()\n try:\n\n sheet_config=wb_listas['Config']\n area_correo=self.dic_asosiacion[tipotransporte]\n \n if area_correo=='Operaciones':\n receiver=sheet_config.cell(row=6, column=2).value\n correo_operaciones=\"\"\n elif area_correo=='Administracion':\n receiver=sheet_config.cell(row=7,column=2).value\n correo_operaciones=sheet_config.cell(row=6, column=2).value\n else:\n receiver=sheet_config.cell(row=8,column=2).value\n correo_operaciones=sheet_config.cell(row=6, column=2).value\n\n dic_info_cliente={}\n dic_info_cliente['nombreresponsable']=nombreresponsable\n dic_info_cliente['telefono_resp']=telefono_resp\n dic_info_cliente['correo']=correo\n dic_info_cliente['nombresiso']=nombresiso\n dic_info_cliente['telefono_siso']=telefono_siso\n dic_info_cliente['horasantes']=horasantes\n \n self.enviar_email(receiver, self.nro_req, area_correo, correo_operaciones,dic_info_cliente )\n \n except Exception as e:\n error_msgbox=wx.MessageDialog(None,'Hubo un problema al enviar el correo, valide su conexion a Internet','ERROR',wx.ICON_ERROR)\n error_msgbox.ShowModal() \n \n def leer_consecutivo_tipo_reg(self, tiporeq, hist_req_sheet):\n \n if tiporeq=='Venta' or tiporeq=='Alquiler' or tiporeq=='Modificacion':\n with open(path_vta_alq_mod_consecutivos, 'r') as f:\n lines=f.readlines()\n last=int(lines[-1])\n siguiente_consec=last+1\n hist_req_sheet.cell(row=self.fila_vacia, column=col_consecutivo_tipo_req).value=siguiente_consec\n return siguiente_consec\n \n elif tiporeq=='Movimiento' or tiporeq=='Devolucion':\n with open(path_mov_dev_consecutivos, 'r') as f:\n lines=f.readlines()\n last=int(lines[-1])\n siguiente_consec=last+1\n hist_req_sheet.cell(row=self.fila_vacia, column=col_consecutivo_tipo_req).value=siguiente_consec\n return siguiente_consec\n \n \n \n def validar_campos_vacios_texto(self,diccionario_campos_oblig):\n for campo in diccionario_campos_oblig:\n if len(campo.GetValue().strip()) == 0:\n error_msgbox=wx.MessageDialog(None,'Falta diligenciar el campo: ' + diccionario_campos_oblig[campo],'ERROR',wx.ICON_ERROR)\n error_msgbox.ShowModal() \n return False\n return True\n \n def validar_campos_vacios_numero(self, diccionario_campos_oblig):\n \n for campo in diccionario_campos_oblig:\n if campo.GetValue().isnumeric() == False:\n error_msgbox=wx.MessageDialog(None,'El campo: ' + diccionario_campos_oblig[campo] +' Solo debe contener caracteres numericos','ERROR',wx.ICON_ERROR)\n error_msgbox.ShowModal() \n return False\n return True\n \n def validar_seleccion_combos(self,diccionario_campos_oblig):\n \n for campo in diccionario_campos_oblig:\n if campo.GetSelection()== 0 or campo.GetSelection()== -1:\n error_msgbox=wx.MessageDialog(None,'Seleccione una opcion en el campo ' + diccionario_campos_oblig[campo],'ERROR',wx.ICON_ERROR)\n error_msgbox.ShowModal() \n return False\n return True\n\nclass NuevoReqPanel(wx.Panel):\n\n def __init__(self,parent):\n # create the panel\n wx.Panel.__init__(self, parent=parent)\n try:\n\n image_file = 'logo35.png'\n bmp1 = wx.Image(\n image_file, \n wx.BITMAP_TYPE_ANY).ConvertToBitmap()\n # image's upper left corner anchors at panel \n # coordinates (0, 0)\n self.bitmap2 = wx.StaticBitmap(\n self, -1, bmp1, (5, 0))\n # show some image details\n #str1 = \"%s %dx%d\" % (image_file, bmp1.GetWidth(),\n #bmp1.GetHeight()) \n #parent.SetTitle(str1)\n except IOError:\n print (\"Image file %s not found\")\n raise SystemExit \n\n##################################################### ^ NUEVO REQUERIMIENTO ^ #############################################################################################################################\n\n##################################################### LOGISTICA #############################################################################################################################\nclass ww_logistica21(wx.Frame):\n \n def __init__(self,parent):\n \n wb_listas=openpyxl.load_workbook(path_config)\n wb_req=openpyxl.load_workbook(path_db)\n\n wx.Frame.__init__(self, None, wx.ID_ANY, \"Centro Logistico\", size=(270, 250),style=wx.DEFAULT_FRAME_STYLE & ~(wx.RESIZE_BORDER | wx.MAXIMIZE_BOX)) \n self.SetBackgroundColour(secondary_color)\n self.Center()\n try:\n \n #image_file = 'CINCO CONSULTORES.jpg'\n #bmp1 = wx.Image(\n #image_file, \n #wx.BITMAP_TYPE_ANY).ConvertToBitmap()\n \n #self.panel = wx.StaticBitmap(\n #self, -1, bmp1, (0, 0)\n self.panel=wx.Panel(self)\n panel_font= wx.Font(10, wx.DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL,underline=False,faceName=\"Folks-Normal\")\n self.panel.SetBackgroundColour(secondary_color)\n\n except IOError:\n print (\"Image file %s not found\" )\n raise SystemExit\n \n ico = wx.Icon('Cont.ico', wx.BITMAP_TYPE_ICO)\n self.SetIcon(ico)\n self.fgs= wx.GridBagSizer(0,0)\n \n title_font= wx.Font(11, wx.FONTFAMILY_DECORATIVE, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL,underline=False,faceName=\"Folks-Normal\")\n \n self.lbltitle =wx.StaticText(self.panel, label='Ingrese Numero de Requerimiento\\n a Gestionar:')\n self.lbltitle.SetFont(title_font)\n self.lbltitle.SetBackgroundColour(secondary_color)\n self.lbltitle.SetForegroundColour(principal_color)\n self.fgs.Add(self.lbltitle,pos=(2,1),span=(1,3), flag=wx.ALL | wx.ALIGN_CENTER, border=5)\n\n self.txtreq = wx.TextCtrl(self.panel)\n self.fgs.Add(self.txtreq , pos=(4,1),span=(1,3), flag= wx.ALL| wx.ALIGN_CENTER, border=5)\n \n btn_aceptar = wx.Button(self.panel, id=wx.ID_OK, label=\"Aceptar\",size=(-1,-1))\n self.fgs.Add(btn_aceptar, pos=(6,1),span=(1,3), flag= wx.ALL | wx.ALIGN_CENTER, border=0)\n btn_aceptar.Bind(wx.EVT_BUTTON, self.open_logistica22)\n\n mainSizer= wx.BoxSizer(wx.VERTICAL)\n mainSizer.Add(self.fgs,0, flag=wx.ALIGN_LEFT)\n self.panel.SetSizerAndFit(mainSizer)\n \n #-------------Button Functions-----------------# \n def open_logistica22(self, event):\n \n wb_req=openpyxl.load_workbook(path_db)\n\n\n \n hist_req_sheet=wb_req['Requerimientos']\n self.lista_nro_req=[]\n \n for cell in hist_req_sheet['A']:\n if cell.value !=None:\n self.lista_nro_req.append(cell.value)\n global req_selec\n try:\n req_selec=int(self.txtreq.GetValue())\n except:\n error_msgbox=wx.MessageDialog(None,'Numero de Requerimiento No Encontrado','ERROR',wx.ICON_ERROR)\n error_msgbox.ShowModal()\n return\n \n if req_selec in self.lista_nro_req:\n ww_logistica22(parent=self.panel).Show() \n self.Destroy()\n else:\n error_msgbox=wx.MessageDialog(None,'Numero de Requerimiento No Encontrado','ERROR',wx.ICON_ERROR)\n error_msgbox.ShowModal() \n \nclass ww_logistica22(wx.Frame):\n def __init__(self,parent): \n \n wb_listas=openpyxl.load_workbook(path_config)\n \n \n req2_sheet=wb_listas['Requerimientos-12']\n wb_req=openpyxl.load_workbook(path_db)\n self.hist_req_sheet=wb_req['Requerimientos']\n \n self.lista_descargue=[]\n for cell in req2_sheet['E']:\n if cell.value != None:\n self.lista_descargue.append(cell.value)\n\n \n global req_selec\n \n self.lista_requerimientos=[]\n \n for cell in self.hist_req_sheet['A']:\n if cell.value != None:\n self.lista_requerimientos.append(cell.value)\n\n self.nro_fila_req=int(self.lista_requerimientos.index(req_selec))+1\n \n #make a list thtat contains every data in the row\n self.lista_valores_fila=[]\n for cell in self.hist_req_sheet[self.nro_fila_req]:\n self.lista_valores_fila.append(cell.value)\n \n #----------Front------------#\n wx.Frame.__init__(self, None, wx.ID_ANY, \"Centro Logistico\", size=(1020, 685),style=wx.DEFAULT_FRAME_STYLE & ~(wx.RESIZE_BORDER | wx.MAXIMIZE_BOX))\n self.panel = LogisticaPanel(self)\n panel_font= wx.Font(10, wx.DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL,underline=False,faceName=\"Folks-Normal\")\n self.panel.SetFont(panel_font)\n self.SetBackgroundColour(secondary_color)\n self.Center()\n \n ico = wx.Icon('Cont.ico', wx.BITMAP_TYPE_ICO)\n self.SetIcon(ico)\n self.fgs= wx.GridBagSizer(0,0)\n \n title_font= wx.Font(25, wx.FONTFAMILY_DECORATIVE, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL,underline=False,faceName=\"Folks-Bold\")\n title_font3= wx.Font(15, wx.FONTFAMILY_DECORATIVE, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL,underline=False,faceName=\"Folks-Bold\")\n bold_font= wx.Font(10, wx.FONTFAMILY_DECORATIVE, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL,underline=False,faceName=\"Folks-Bold\")\n \n \n self.lbltitle=wx.StaticText(self.panel, label='LOGISTICA')\n self.lblrequerimiento=wx.StaticText(self.panel, label='Requerimiento N° ' + str(self.lista_valores_fila[col_requerimiento_auto-1]))\n self.lblconsecutivo_tiporeq=wx.StaticText(self.panel, label='Remision N° ' + str(self.lista_valores_fila[col_consecutivo_tipo_req-1]))\n self.lblfecha=wx.StaticText(self.panel, label='Fecha '+ self.lista_valores_fila[col_fecha_auto-1])\n self.lblareaencargada=wx.StaticText(self.panel, label='Area Encargada: ' + self.lista_valores_fila[col_area-1])\n self.lblcotizacion=wx.StaticText(self.panel, label='Cotizacion N°')\n self.lbltiporeq=wx.StaticText(self.panel, label='Tipo Requerimiento')\n self.lbltipotransp=wx.StaticText(self.panel, label='Tipo de Transporte')\n self.lblcont=wx.StaticText(self.panel, label='Contenedor')\n self.lbltipocont=wx.StaticText(self.panel, label='Tipo')\n self.lbldescargue=wx.StaticText(self.panel, label='Requiere Descargue')\n self.lblorigen=wx.StaticText(self.panel, label='Origen')\n self.lbldestino=wx.StaticText(self.panel, label='Destino')\n self.lblkm=wx.StaticText(self.panel, label='Km')\n self.lblprecio=wx.StaticText(self.panel, label='Precio + Recargo')\n self.lblrecargotransporte=wx.StaticText(self.panel, label='Recargo Transp.')\n self.lblnombreresp=wx.StaticText(self.panel, label='Nombre\\nResponsable')\n self.lbltelresp=wx.StaticText(self.panel, label='Telefono Resp.')\n self.lbl_correoresp=wx.StaticText(self.panel, label='Correo')\n self.lblnombresiso=wx.StaticText(self.panel, label='Nombre SISO')\n self.lbltelesiso=wx.StaticText(self.panel, label='Telefono SISO')\n self.lbldebeinfo=wx.StaticText(self.panel, label='Debe Enviarse\\nInformacion')\n self.lblhorasantes=wx.StaticText(self.panel, label='N° de Horas Antes')\n self.lblinfologistica=wx.StaticText(self.panel, label='Info Logistica')\n self.lblinfocliente=wx.StaticText(self.panel, label='Info Cliente')\n self.lblfechaentrega=wx.StaticText(self.panel, label='Fecha de Entrega')\n self.lbldireccion=wx.StaticText(self.panel, label='Direccion Exacta')\n self.lblreferenciacont=wx.StaticText(self.panel, label='Ref.Contenedor')\n self.lblnombreconduc=wx.StaticText(self.panel, label='Nombre Conductor')\n self.lblcedula=wx.StaticText(self.panel, label='Cedula')\n self.lbltelefonoconduc=wx.StaticText(self.panel, label='Telefono')\n self.lblplaca=wx.StaticText(self.panel, label='Placa')\n self.lbladiciones=wx.StaticText(self.panel, label='Adiciones Entrega')\n \n if self.lista_valores_fila[col_horasantes-1] != None:\n horas=str(self.lista_valores_fila[col_horasantes-1])\n else:\n horas='0'\n \n self.lblpreguntahoras=wx.StaticText(self.panel, label='Documentacion Enviada '+ horas + ' Horas Antes?')\n self.lblpreguntadoc=wx.StaticText(self.panel, label='Documentacion Completa?')\n self.lblnombrecliente=wx.StaticText(self.panel, label='Nombre Cliente')\n\n self.txtcotizacion=wx.TextCtrl(self.panel,style=wx.TE_READONLY)\n self.txttiporeq=wx.TextCtrl(self.panel,style=wx.TE_READONLY)\n self.txttipotransp=wx.TextCtrl(self.panel,style=wx.TE_READONLY)\n self.txtcont=wx.TextCtrl(self.panel,style=wx.TE_READONLY)\n self.txttipocont=wx.TextCtrl(self.panel,style=wx.TE_READONLY)\n self.txtdescargue=wx.TextCtrl(self.panel,style=wx.TE_READONLY)\n self.txtorigen=wx.TextCtrl(self.panel,style=wx.TE_READONLY)\n self.txtdestino=wx.TextCtrl(self.panel,style=wx.TE_READONLY)\n self.txtkm=wx.TextCtrl(self.panel,style=wx.TE_READONLY)\n self.txtprecio=wx.TextCtrl(self.panel,style=wx.TE_READONLY)\n self.txtrecargotransporte=wx.TextCtrl(self.panel,style=wx.TE_READONLY)\n self.txtnombreresp=wx.TextCtrl(self.panel,style=wx.TE_READONLY)\n self.txttelresp=wx.TextCtrl(self.panel,style=wx.TE_READONLY)\n self.txtcorreoresp=wx.TextCtrl(self.panel,style=wx.TE_READONLY)\n self.txtnombresiso=wx.TextCtrl(self.panel,style=wx.TE_READONLY)\n self.txttelesiso=wx.TextCtrl(self.panel,style=wx.TE_READONLY)\n self.txtdebeinfo=wx.TextCtrl(self.panel,style=wx.TE_READONLY)\n self.txthorasantes=wx.TextCtrl(self.panel,style=wx.TE_READONLY)\n self.txtnombrecliente=wx.TextCtrl(self.panel,style=wx.TE_READONLY)\n\n try:\n self.txtcotizacion.SetValue(self.lista_valores_fila[col_cotizacion-1])\n except:\n self.txtcotizacion.SetValue('N/A')\n \n try:\n self.txttiporeq.SetValue(self.lista_valores_fila[col_tipo_req-1])\n except:\n self.txttiporeq.SetValue('N/A')\n try:\n self.txttipotransp.SetValue(self.lista_valores_fila[col_tipotransporte-1])\n except:\n self.txttipotransp.SetValue('N/A')\n try:\n self.txtcont.SetValue(self.lista_valores_fila[col_contenedor-1])\n except:\n self.txtcont.SetValue('N/A')\n try:\n self.txttipocont.SetValue(self.lista_valores_fila[col_tipocont-1])\n except:\n self.txttipocont.SetValue('N/A') \n \n try:\n self.txtdescargue.SetValue(self.lista_valores_fila[col_requieredescargue-1])\n except:\n self.txtdescargue.SetValue('N/A') \n try:\n self.txtorigen.SetValue(self.lista_valores_fila[col_origen-1])\n except:\n self.txtorigen.SetValue('N/A')\n try:\n self.txtdestino.SetValue(self.lista_valores_fila[col_destino-1])\n except:\n self.txtdestino.SetValue('N/A')\n try:\n self.txtkm.SetValue(self.lista_valores_fila[col_km-1])\n except:\n self.txtkm.SetValue('N/A')\n try:\n self.txtprecio.SetValue(str(self.lista_valores_fila[col_precio_recargo-1]))\n except:\n\n self.txtprecio.SetValue('N/A')\n try:\n self.txtrecargotransporte.SetValue(self.lista_valores_fila[col_recargotransporte-1])\n except:\n self.txtrecargotransporte.SetValue('N/A')\n try:\n self.txtnombreresp.SetValue(self.lista_valores_fila[col_nombreresponsable-1])\n except:\n self.txtnombreresp.SetValue('N/A')\n try:\n self.txttelresp.SetValue(self.lista_valores_fila[col_telefono_resp-1])\n except:\n self.txttelresp.SetValue('N/A')\n try:\n self.txtcorreoresp.SetValue(self.lista_valores_fila[col_correo-1])\n except:\n self.txtcorreoresp.SetValue('N/A')\n try:\n self.txtnombresiso.SetValue(self.lista_valores_fila[col_nombresiso-1])\n except:\n self.txtnombresiso.SetValue('N/A')\n try:\n self.txttelesiso.SetValue(self.lista_valores_fila[col_telefono_siso-1])\n except:\n self.txttelesiso.SetValue('N/A')\n try:\n self.txtdebeinfo.SetValue(self.lista_valores_fila[col_debeinfo-1])\n except:\n self.txtdebeinfo.SetValue('N/A')\n try:\n self.txthorasantes.SetValue(str(self.lista_valores_fila[col_horasantes-1]))\n except:\n self.txthorasantes.SetValue('N/A')\n \n try:\n self.txtnombrecliente.SetValue(str(self.lista_valores_fila[col_nombrecliente-1]))\n except:\n self.txtnombrecliente.SetValue('N/A')\n\n\n \n \n self.txtfechaentrega=wx.adv.DatePickerCtrl(self.panel,style=wx.adv.DP_DROPDOWN | wx.adv.DP_ALLOWNONE)\n self.txtdireccion=wx.TextCtrl(self.panel)\n self.txtreferenciacont=wx.TextCtrl(self.panel)\n self.txtplaca=wx.TextCtrl(self.panel)\n self.txttelefonoconduc=wx.TextCtrl(self.panel)\n self.txtcedula=wx.TextCtrl(self.panel)\n self.txtnombreconduc=wx.TextCtrl(self.panel)\n self.txtadiciones=wx.TextCtrl(self.panel,style = wx.TE_MULTILINE)\n self.checkpreguntadoc_si=wx.CheckBox(self.panel, label= \"Si\")\n self.checkpreguntadoc_no=wx.CheckBox(self.panel, label= \"No\")\n self.checkpreguntahoras_si=wx.CheckBox(self.panel, label= \"Si\")\n self.checkpreguntahoras_no=wx.CheckBox(self.panel, label= \"No\")\n \n \n #try de fecha, si el formato en la hoja de excel no es fecha, pasa a la excepcion\n try:\n self.txtfechaentrega.SetValue(self.lista_valores_fila[col_fechaentrega-1])\n except:\n #try de fecha, si el formato en la hoja de excel no es ni fecha, ni string, pasa a la excepcion\n try:\n self.txtfechaentrega.SetValue(datetime.strptime(self.lista_valores_fila[col_fechaentrega-1], '%d/%m/%Y'))\n except Exception as e:\n self.txtfechaentrega.SetValue(wx.DateTime())\n \n try:\n self.txtdireccion.SetValue(self.lista_valores_fila[col_direccion-1])\n except:\n self.txtdireccion.SetValue('')\n \n try:\n self.txtreferenciacont.SetValue(self.lista_valores_fila[col_referenciacont-1])\n except:\n self.txtreferenciacont.SetValue('')\n \n try:\n self.txtplaca.SetValue(self.lista_valores_fila[col_placa-1])\n except:\n self.txtplaca.SetValue('')\n \n try:\n if self.lista_valores_fila[col_telefonoconduc-1]==None:\n self.txttelefonoconduc.SetValue('') \n else:\n self.txttelefonoconduc.SetValue(str(self.lista_valores_fila[col_telefonoconduc-1]))\n except:\n self.txttelefonoconduc.SetValue('')\n \n try:\n if self.lista_valores_fila[col_cedula-1]== None:\n self.txtcedula.SetValue('')\n else:\n self.txtcedula.SetValue('')\n self.txtcedula.SetValue(str(self.lista_valores_fila[col_cedula-1]))\n except:\n self.txtcedula.SetValue('')\n \n \n try:\n self.txtnombreconduc.SetValue(self.lista_valores_fila[col_nombreconduc-1])\n except:\n self.txtnombreconduc.SetValue('')\n \n try:\n self.txtadiciones.SetValue(self.lista_valores_fila[col_adiciones-1])\n except:\n self.txtadiciones.SetValue('CONTENEDOR: ' + self.txtcont.GetValue() + ' - TIPO: ' + self.txttipocont.GetValue())\n\n \n \n \n self.lblrequerimiento.SetFont(bold_font)\n self.lblconsecutivo_tiporeq.SetFont(bold_font)\n self.lblfecha.SetFont(bold_font)\n self.lblareaencargada.SetFont(bold_font)\n self.lblcotizacion.SetFont(bold_font)\n self.lbltipotransp.SetFont(bold_font)\n self.lblcont.SetFont(bold_font)\n self.lbltipocont.SetFont(bold_font)\n self.lbldescargue.SetFont(bold_font)\n self.lblorigen.SetFont(bold_font)\n self.lbldestino.SetFont(bold_font)\n self.lblkm.SetFont(bold_font)\n self.lblprecio.SetFont(bold_font)\n self.lblrecargotransporte.SetFont(bold_font)\n self.lblnombreresp.SetFont(bold_font)\n self.lbltelresp.SetFont(bold_font)\n self.lbl_correoresp.SetFont(bold_font)\n self.lblnombresiso.SetFont(bold_font)\n self.lbltelesiso.SetFont(bold_font)\n self.lbldebeinfo.SetFont(bold_font)\n self.lblhorasantes.SetFont(bold_font)\n self.lblinfologistica.SetFont(title_font3)\n self.lblinfocliente.SetFont(title_font3)\n self.lblfechaentrega.SetFont(bold_font)\n self.lbldireccion.SetFont(bold_font)\n self.lblreferenciacont.SetFont(bold_font)\n self.lblnombreconduc.SetFont(bold_font)\n self.lblcedula.SetFont(bold_font)\n self.lbltelefonoconduc.SetFont(bold_font)\n self.lblplaca.SetFont(bold_font)\n self.lbladiciones.SetFont(bold_font)\n self.lblpreguntahoras.SetFont(bold_font)\n self.lblpreguntadoc.SetFont(bold_font)\n self.lbltiporeq.SetFont(bold_font)\n self.lblnombrecliente.SetFont(bold_font)\n\n self.lbltitle.SetBackgroundColour(secondary_color)\n self.lblrequerimiento.SetBackgroundColour(secondary_color)\n self.lblconsecutivo_tiporeq.SetBackgroundColour(secondary_color)\n self.lblfecha.SetBackgroundColour(secondary_color)\n self.lblareaencargada.SetBackgroundColour(secondary_color)\n self.lblcotizacion.SetBackgroundColour(secondary_color)\n self.lbltipotransp.SetBackgroundColour(secondary_color)\n self.lblcont.SetBackgroundColour(secondary_color)\n self.lbltipocont.SetBackgroundColour(secondary_color)\n self.lbldescargue.SetBackgroundColour(secondary_color)\n self.lblorigen.SetBackgroundColour(secondary_color)\n self.lbldestino.SetBackgroundColour(secondary_color)\n self.lblkm.SetBackgroundColour(secondary_color)\n self.lblprecio.SetBackgroundColour(secondary_color)\n self.lblrecargotransporte.SetBackgroundColour(secondary_color)\n self.lblnombreresp.SetBackgroundColour(secondary_color)\n self.lbltelresp.SetBackgroundColour(secondary_color)\n self.lbl_correoresp.SetBackgroundColour(secondary_color)\n self.lblnombresiso.SetBackgroundColour(secondary_color)\n self.lbltelesiso.SetBackgroundColour(secondary_color)\n self.lbldebeinfo.SetBackgroundColour(secondary_color)\n self.lblhorasantes.SetBackgroundColour(secondary_color)\n self.lblinfologistica.SetBackgroundColour(secondary_color)\n self.lblinfocliente.SetBackgroundColour(secondary_color)\n self.lblfechaentrega.SetBackgroundColour(secondary_color)\n self.lbldireccion.SetBackgroundColour(secondary_color)\n self.lblreferenciacont.SetBackgroundColour(secondary_color)\n self.lblnombreconduc.SetBackgroundColour(secondary_color)\n self.lblcedula.SetBackgroundColour(secondary_color)\n self.lbltelefonoconduc.SetBackgroundColour(secondary_color)\n self.lblplaca.SetBackgroundColour(secondary_color)\n self.lbladiciones.SetBackgroundColour(secondary_color)\n self.lblpreguntahoras.SetBackgroundColour(secondary_color)\n self.lblpreguntadoc.SetBackgroundColour(secondary_color)\n self.lbltiporeq.SetBackgroundColour(secondary_color)\n\n self.checkpreguntahoras_si.SetBackgroundColour(secondary_color)\n self.checkpreguntahoras_no.SetBackgroundColour(secondary_color)\n self.checkpreguntadoc_si.SetBackgroundColour(secondary_color)\n self.checkpreguntadoc_no.SetBackgroundColour(secondary_color)\n\n self.lbltitle.SetForegroundColour(principal_color)\n self.lblrequerimiento.SetForegroundColour(principal_color)\n self.lblconsecutivo_tiporeq.SetForegroundColour(principal_color)\n self.lblfecha.SetForegroundColour(principal_color)\n self.lblareaencargada.SetForegroundColour(principal_color)\n self.lblcotizacion.SetForegroundColour(principal_color)\n self.lbltipotransp.SetForegroundColour(principal_color)\n self.lblcont.SetForegroundColour(principal_color)\n self.lbltipocont.SetForegroundColour(principal_color)\n self.lbldescargue.SetForegroundColour(principal_color)\n self.lblorigen.SetForegroundColour(principal_color)\n self.lbldestino.SetForegroundColour(principal_color)\n self.lblkm.SetForegroundColour(principal_color)\n self.lblprecio.SetForegroundColour(principal_color)\n self.lblrecargotransporte.SetForegroundColour(principal_color)\n self.lblnombreresp.SetForegroundColour(principal_color)\n self.lbltelresp.SetForegroundColour(principal_color)\n self.lbl_correoresp.SetForegroundColour(principal_color)\n self.lblnombresiso.SetForegroundColour(principal_color)\n self.lbltelesiso.SetForegroundColour(principal_color)\n self.lbldebeinfo.SetForegroundColour(principal_color)\n self.lblhorasantes.SetForegroundColour(principal_color)\n self.lblinfologistica.SetForegroundColour(principal_color)\n self.lblinfocliente.SetForegroundColour(principal_color)\n self.lblfechaentrega.SetForegroundColour(principal_color)\n self.lbldireccion.SetForegroundColour(principal_color)\n self.lblreferenciacont.SetForegroundColour(principal_color)\n self.lblnombreconduc.SetForegroundColour(principal_color)\n self.lblcedula.SetForegroundColour(principal_color)\n self.lbltelefonoconduc.SetForegroundColour(principal_color)\n self.lblplaca.SetForegroundColour(principal_color)\n self.lbladiciones.SetForegroundColour(principal_color)\n self.lblpreguntahoras.SetForegroundColour(principal_color)\n self.lblpreguntadoc.SetForegroundColour(principal_color)\n self.lbltiporeq.SetForegroundColour(principal_color)\n self.lblnombrecliente.SetForegroundColour(principal_color)\n\n self.checkpreguntahoras_si.SetForegroundColour(principal_color)\n self.checkpreguntahoras_no.SetForegroundColour(principal_color)\n self.checkpreguntadoc_si.SetForegroundColour(principal_color)\n self.checkpreguntadoc_no.SetForegroundColour(principal_color)\n \n \n btn_finalizar = wx.Button(self.panel, id=wx.ID_ANY, label=\"Finalizar\",size=(-1,-1))\n \n self.lbltitle.SetFont(title_font)\n self.lblrequerimiento.SetFont(title_font3)\n\n \n self.fgs.Add(self.lbltitle,pos=(1,1),span=(1,8), flag= wx.ALL | wx.ALIGN_CENTER, border=5)\n self.fgs.Add(self.lblrequerimiento,pos=(2,1),span=(1,2), flag= wx.ALL, border=0)\n self.fgs.Add(self.lblconsecutivo_tiporeq,pos=(3,1),span=(1,2), flag= wx.ALL, border=0)\n self.fgs.Add(self.lblfecha,pos=(4,1),span=(1,1), flag= wx.ALL, border=0)\n self.fgs.Add(self.lblareaencargada,pos=(5,1),span=(1,2), flag= wx.ALL, border=0)\n self.fgs.Add(self.lblcotizacion,pos=(7,1),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lbltiporeq,pos=(8,1),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lbltipotransp,pos=(9,1),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lblcont,pos=(10,1),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lbltipocont,pos=(11,1),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lbldescargue,pos=(12,1),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lblorigen,pos=(7,3),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lbldestino,pos=(8,3),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lblkm,pos=(9,3),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lblprecio,pos=(10,3),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lblrecargotransporte,pos=(11,3),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lblnombreresp,pos=(7,5),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lbltelresp,pos=(8,5),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lbl_correoresp,pos=(9,5),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lblnombresiso,pos=(10,5),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lbltelesiso,pos=(9,7),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lbldebeinfo,pos=(7,7),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lblhorasantes,pos=(8,7),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lblinfologistica,pos=(6,1),span=(1,4), flag= wx.ALL | wx.ALIGN_CENTER, border=5)\n self.fgs.Add(self.lblinfocliente,pos=(6,5),span=(1,4), flag= wx.ALL| wx.ALIGN_CENTER, border=5)\n self.fgs.Add(self.lblfechaentrega,pos=(13,1),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lbldireccion,pos=(13,3),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lblreferenciacont,pos=(13,7),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lblnombreconduc,pos=(14,4),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lblcedula,pos=(14,3),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lbltelefonoconduc,pos=(14,2),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lblplaca,pos=(14,1),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.lbladiciones,pos=(14,6),span=(1,3), flag= wx.ALL|wx.ALIGN_CENTER, border=5)\n self.fgs.Add(self.lblpreguntahoras,pos=(20,1),span=(2,2), flag= wx.ALL |wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL , border=5)\n self.fgs.Add(self.lblpreguntadoc,pos=(17,1),span=(2,2), flag= wx.ALL |wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL, border=5)\n self.fgs.Add(self.txtcotizacion,pos=(7,2),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txttipotransp,pos=(9,2),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txtcont,pos=(10,2),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txttipocont,pos=(11,2),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txtdescargue,pos=(12,2),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txttiporeq,pos=(8,2),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txtorigen,pos=(7,4),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txtdestino,pos=(8,4),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txtkm,pos=(9,4),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txtprecio,pos=(10,4),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txtrecargotransporte,pos=(11,4),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txtnombreresp,pos=(7,6),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txttelresp,pos=(8,6),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txtcorreoresp,pos=(9,6),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txtnombresiso,pos=(10,6),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txttelesiso,pos=(9,8),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txtdebeinfo,pos=(7,8),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txthorasantes,pos=(8,8),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txtfechaentrega,pos=(13,2),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txtdireccion,pos=(13,4),span=(1,3), flag= wx.ALL | wx.EXPAND, border=5)\n self.fgs.Add(self.txtreferenciacont,pos=(13,8),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txtnombreconduc,pos=(15,4),span=(1,2), flag= wx.ALL | wx.EXPAND, border=5)\n self.fgs.Add(self.txtcedula,pos=(15,3),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txttelefonoconduc,pos=(15,2),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txtplaca,pos=(15,1),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.txtadiciones,pos=(15,6),span=(4,3), flag= wx.ALL| wx.EXPAND, border=5)\n self.fgs.Add(self.checkpreguntahoras_si,pos=(20,3),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.checkpreguntahoras_no,pos=(21,3),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.checkpreguntadoc_si,pos=(17,3),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(self.checkpreguntadoc_no,pos=(18,3),span=(1,1), flag= wx.ALL, border=5)\n self.fgs.Add(btn_finalizar,pos=(21,8),span=(1,1), flag= wx.ALL, border=5)\n\n self.fgs.Add(self.txtnombrecliente,pos=(11,6),span=(1,2), flag= wx.ALL| wx.EXPAND, border=5)\n self.fgs.Add(self.lblnombrecliente,pos=(11,5),span=(1,1), flag= wx.ALL, border=5)\n\n\n self.checkpreguntahoras_si.Bind(wx.EVT_CHECKBOX, self.onCheckhoras_si)\n self.checkpreguntahoras_no.Bind(wx.EVT_CHECKBOX, self.onCheckhoras_no)\n \n self.checkpreguntadoc_si.Bind(wx.EVT_CHECKBOX, self.onCheckdoc_si)\n self.checkpreguntadoc_no.Bind(wx.EVT_CHECKBOX, self.onCheckdoc_no)\n \n btn_finalizar.Bind(wx.EVT_BUTTON, self.finalizar)\n \n \n mainSizer= wx.BoxSizer(wx.VERTICAL)\n mainSizer.Add(self.fgs,0, flag=wx.ALIGN_LEFT)\n self.panel.SetSizerAndFit(mainSizer)\n \n if self.lista_valores_fila[col_debeinfo -1]==self.lista_descargue[3]:\n # self.lbldebeinfo.Hide()\n # self.lblhorasantes.Hide()\n # self.txtdebeinfo.Hide()\n self.txthorasantes.SetValue('N/A')\n self.lblpreguntahoras.Hide()\n self.checkpreguntahoras_si.Hide()\n self.checkpreguntahoras_no.Hide()\n\n\n \n def onCheckhoras_si(self,event):\n if self.checkpreguntahoras_no.IsChecked():\n self.checkpreguntahoras_no.SetValue(False)\n \n def onCheckhoras_no(self,event):\n if self.checkpreguntahoras_si.IsChecked():\n self.checkpreguntahoras_si.SetValue(False)\n \n def onCheckdoc_si(self,event):\n if self.checkpreguntadoc_no.IsChecked():\n self.checkpreguntadoc_no.SetValue(False)\n \n def onCheckdoc_no(self,event):\n if self.checkpreguntadoc_si.IsChecked():\n self.checkpreguntadoc_si.SetValue(False)\n \n \n \n def finalizar(self,event):\n wb_req=openpyxl.load_workbook(path_db)\n self.hist_req_sheet=wb_req['Requerimientos']\n \n \n diccionario_campos_oblig_texto={self.txtdireccion:'Direccion Exacta', \n self.txtreferenciacont:'Referencia Contenedor', self.txtreferenciacont:'Ref. Contenedor', \n self.txtplaca:'Placa', self.txttelefonoconduc:'Telefono', self.txtcedula:'Cedula',\n self.txtnombreconduc:'Nombre Conductor'}\n \n checkbox1=[self.checkpreguntadoc_no,self.checkpreguntadoc_si]\n \n if self.validar_campos_vacios_texto(diccionario_campos_oblig_texto)==False:\n return\n if self.validar_checkbox(checkbox1,'Documentacion Completa?')==False:\n return\n \n if self.lista_valores_fila[col_debeinfo -1]==self.lista_descargue[3]:\n pass\n else:\n checkbox2=[self.checkpreguntahoras_no,self.checkpreguntahoras_si]\n if self.validar_checkbox(checkbox2,'Documentacion Enviada Horas Antes?')==False:\n return\n\n fechaentrega=self.txtfechaentrega.GetValue()\n fechaentrega=datetime.strptime(fechaentrega.Format('%d/%m/%Y'),'%d/%m/%Y')\n direccion=self.txtdireccion.GetValue()\n referenciacont=self.txtreferenciacont.GetValue()\n nombreconduc=self.txtnombreconduc.GetValue()\n cedula=self.txtcedula.GetValue()\n telefonoconduc=self.txttelefonoconduc.GetValue()\n placa=self.txtplaca.GetValue()\n adiciones=self.txtadiciones.GetValue()\n \n if self.checkpreguntahoras_si.IsChecked():\n check_horas=\"Si\"\n else:\n check_horas=\"No\"\n \n if self.checkpreguntadoc_si.IsChecked():\n check_doc=\"Si\"\n else:\n check_doc=\"No\"\n \n \n self.hist_req_sheet.cell(row=self.nro_fila_req, column=col_fechaentrega).value=fechaentrega\n self.hist_req_sheet.cell(row=self.nro_fila_req, column=col_direccion).value=direccion\n self.hist_req_sheet.cell(row=self.nro_fila_req, column=col_referenciacont).value=referenciacont\n self.hist_req_sheet.cell(row=self.nro_fila_req, column=col_nombreconduc).value=nombreconduc\n self.hist_req_sheet.cell(row=self.nro_fila_req, column=col_cedula).value=cedula\n self.hist_req_sheet.cell(row=self.nro_fila_req, column=col_telefonoconduc).value=telefonoconduc\n self.hist_req_sheet.cell(row=self.nro_fila_req, column=col_placa).value=placa\n self.hist_req_sheet.cell(row=self.nro_fila_req, column=col_adiciones).value=adiciones\n self.hist_req_sheet.cell(row=self.nro_fila_req, column=col_preguntahoras).value=check_horas\n self.hist_req_sheet.cell(row=self.nro_fila_req, column=col_preguntadoc).value=check_doc\n \n try:\n wb_req.save(path_db)\n sgto_msgbox=wx.MessageDialog(None,'Recuerde Hacer el Seguimiento','Atencion',wx.ICON_WARNING)\n sgto_msgbox.ShowModal()\n self.Destroy()\n except:\n error_msgbox=wx.MessageDialog(None,'Error al guardar el registro en la BD. \\nVerifique el el archivo de excel este cerrado y en la ruta correcta.','ERROR',wx.ICON_ERROR)\n error_msgbox.ShowModal()\n\n \n def validar_campos_vacios_texto(self,diccionario_campos_oblig):\n for campo in diccionario_campos_oblig:\n if len(campo.GetValue().strip()) == 0:\n error_msgbox=wx.MessageDialog(None,'Falta diligenciar el campo: ' + diccionario_campos_oblig[campo],'ERROR',wx.ICON_ERROR)\n error_msgbox.ShowModal() \n return False\n return True\n def validar_checkbox(self,checkbox,label):\n for i in range (len(checkbox)):\n if checkbox[i].IsChecked():\n return True\n error_msgbox=wx.MessageDialog(None,'Seleccione una opcion en el campo: ' + label,'ERROR',wx.ICON_ERROR)\n error_msgbox.ShowModal()\n return False\n\nclass LogisticaPanel(wx.Panel):\n\n def __init__(self,parent):\n # create the panel\n wx.Panel.__init__(self, parent=parent)\n try:\n\n image_file = 'logo35.png'\n bmp1 = wx.Image(\n image_file, \n wx.BITMAP_TYPE_ANY).ConvertToBitmap()\n # image's upper left corner anchors at panel \n # coordinates (0, 0)\n self.bitmap3 = wx.StaticBitmap(\n self, -1, bmp1, (5, 0))\n # show some image details\n #str1 = \"%s %dx%d\" % (image_file, bmp1.GetWidth(),\n #bmp1.GetHeight()) \n #parent.SetTitle(str1)\n except IOError:\n print (\"Image file %s not found\")\n raise SystemExit \n\n##################################################### ^ LOGISTICA ^ #############################################################################################################################\n\n##################################################### REMISION #############################################################################################################################\nclass ww_remision11(wx.Frame):\n \n def __init__(self,parent):\n \n self.wb_req=openpyxl.load_workbook(path_db)\n self.hist_req_sheet=self.wb_req['Requerimientos']\n \n wx.Frame.__init__(self, None, wx.ID_ANY, \"Centro Logistico\", size=(270, 250),style=wx.DEFAULT_FRAME_STYLE & ~(wx.RESIZE_BORDER | wx.MAXIMIZE_BOX)) \n self.SetBackgroundColour(secondary_color)\n self.Center()\n try:\n self.panel=wx.Panel(self)\n panel_font= wx.Font(10, wx.DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL,underline=False,faceName=\"Folks-Normal\")\n self.panel.SetBackgroundColour(secondary_color)\n self.panel.SetFont(panel_font)\n\n except IOError:\n print (\"Image file %s not found\" )\n raise SystemExit\n \n ico = wx.Icon('Cont.ico', wx.BITMAP_TYPE_ICO)\n self.SetIcon(ico)\n self.fgs= wx.GridBagSizer(0,0)\n \n title_font= wx.Font(11, wx.FONTFAMILY_DECORATIVE, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL,underline=False,faceName=\"Folks-Normal\")\n \n self.lbltitle =wx.StaticText(self.panel, label='Ingrese Numero de Requerimiento:')\n self.lbltitle.SetFont(title_font)\n self.lbltitle.SetBackgroundColour(secondary_color)\n self.lbltitle.SetForegroundColour(principal_color)\n self.fgs.Add(self.lbltitle,pos=(2,1),span=(1,3), flag=wx.ALL | wx.ALIGN_CENTER, border=5)\n\n self.txtreq = wx.TextCtrl(self.panel)\n self.fgs.Add(self.txtreq , pos=(4,1),span=(1,3), flag= wx.ALL| wx.ALIGN_CENTER, border=5)\n \n btn_aceptar = wx.Button(self.panel, id=wx.ID_OK, label=\"Aceptar\",size=(-1,-1))\n self.fgs.Add(btn_aceptar, pos=(6,1),span=(1,3), flag= wx.ALL | wx.ALIGN_CENTER, border=0)\n btn_aceptar.Bind(wx.EVT_BUTTON, self.open_remision22)\n\n mainSizer= wx.BoxSizer(wx.VERTICAL)\n mainSizer.Add(self.fgs,0, flag=wx.ALIGN_LEFT)\n self.panel.SetSizerAndFit(mainSizer)\n \n #-------------Button Functions-----------------# \n def open_remision22(self, event):\n \n self.lista_nro_req=[]\n \n for cell in self.hist_req_sheet['A']:\n if cell.value !=None:\n self.lista_nro_req.append(cell.value)\n global req_selec\n \n try:\n req_selec=int(self.txtreq.GetValue())\n except:\n error_msgbox=wx.MessageDialog(None,'Numero de Requerimiento No Encontrado','ERROR',wx.ICON_ERROR)\n error_msgbox.ShowModal()\n return\n \n if req_selec in self.lista_nro_req:\n index_req=self.lista_nro_req.index(req_selec)\n self.crear_remision(index_req)\n self.Destroy()\n else:\n error_msgbox=wx.MessageDialog(None,'Numero de Requerimiento No Encontrado','ERROR',wx.ICON_ERROR)\n error_msgbox.ShowModal()\n \n def crear_remision(self,index_req):\n \n wb_remision=openpyxl.load_workbook(path_remision)\n sheet_remision=wb_remision['Remision']\n \n img = openpyxl.drawing.image.Image('logo_remision.png')\n img.anchor = 'Y2'\n sheet_remision.add_image(img)\n\n rows=[]\n for row in self.hist_req_sheet.iter_rows(min_row=(index_req+1), max_row=(index_req+1)):\n lbls=[]\n for cell in row:\n lbls.append(cell.value)\n rows.append(lbls)\n \n \n nro_req=str(rows[0][col_requerimiento_auto-1])\n consec_remision=str(rows[0][col_consecutivo_tipo_req-1])\n tipo=rows[0][col_tipo_req-1]\n fecha=datetime.today().strftime('%d-%m-%Y')\n remite='CONTENEDORES DE ANTIOQUIA S.A.S'\n cliente=rows[0][col_nombrecliente-1]\n destino=rows[0][col_destino-1]\n origen=rows[0][col_origen-1]\n direccion=rows[0][col_direccion-1]\n responsable=rows[0][col_nombreresponsable-1]\n celular=rows[0][col_telefono_resp-1]\n placa=rows[0][col_placa-1]\n conductor=rows[0][col_nombreconduc-1]\n cedula=rows[0][col_cedula-1]\n telefono=rows[0][col_telefonoconduc-1]\n adiciones=rows[0][col_adiciones-1]\n nro_interno=rows[0][col_referenciacont-1]\n \n sheet_remision['H11']=nro_req\n sheet_remision['H9']=consec_remision\n sheet_remision['AC9']=tipo\n sheet_remision['H12']=fecha\n sheet_remision['H13']=remite\n sheet_remision['H14']=cliente\n sheet_remision['H15']=destino\n sheet_remision['H16']=origen\n sheet_remision['H17']=direccion\n sheet_remision['H18']=responsable\n sheet_remision['H19']=celular\n sheet_remision['H21']=placa\n sheet_remision['H22']=conductor\n sheet_remision['H23']=cedula\n sheet_remision['H24']=telefono\n sheet_remision['H27']=adiciones\n sheet_remision['H35']=nro_interno\n \n if len(nro_req)==3:\n str_nro_req=nro_req\n elif len(nro_req)==2:\n str_nro_req='0'+str(nro_req)\n elif len(nro_req)==1:\n str_nro_req='00'+str(nro_req)\n \n año=datetime.today().strftime('%Y')\n fecha_remision=año[2:4]\n \n sheet_remision.cell(row=9, column=11)._style=deepcopy(sheet_remision['B9']._style)\n \n if tipo=='Venta' or tipo=='Alquiler' or tipo=='Modificacion':\n new_path=path_remision_A + 'REMISION No ' + str_nro_req + '-' + str(fecha_remision) +' '+ cliente.upper() + ' .xlsx'\n \n elif tipo=='Movimiento' or tipo=='Devolucion':\n new_path=path_remision_B + 'REMISION No ' + str_nro_req + '-' + str(fecha_remision) +' '+ cliente.upper() + ' .xlsx'\n \n try:\n wb_remision.save(new_path)\n sucsess_msgbox=wx.MessageDialog(None,'Remision Generada Exitosamente','Imprimir Remision',wx.ICON_INFORMATION)\n sucsess_msgbox.ShowModal()\n except:\n error_msgbox=wx.MessageDialog(None,'La remision no pudo ser generada','ERROR',wx.ICON_ERROR)\n error_msgbox.ShowModal() \n \n##################################################### ^ LOGISTICA ^ #############################################################################################################################\n\nclass ww_configuracion(wx.Frame): \n \n def __init__(self,parent):\n \n\n \n wx.Frame.__init__(self, None, wx.ID_ANY, \"Contenedores de Antioquia - Centro Logistico\", size=(250, 250)) \n \n try:\n \n #image_file = 'CINCO CONSULTORES.jpg'\n #bmp1 = wx.Image(\n #image_file, \n #wx.BITMAP_TYPE_ANY).ConvertToBitmap()\n \n #self.panel = wx.StaticBitmap(\n #self, -1, bmp1, (0, 0)\n self.panel=wx.Panel(self)\n self.panel.SetBackgroundColour(secondary_color)\n\n except IOError:\n print (\"Image file %s not found\" )\n raise SystemExit\n \n ico = wx.Icon('Cont.ico', wx.BITMAP_TYPE_ICO)\n self.SetIcon(ico)\n self.fgs= wx.GridBagSizer(0,0)\n \n title_font= wx.Font(10, wx.FONTFAMILY_DECORATIVE, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD)\n \n self.lbltitle =wx.StaticText(self.panel, label='Ingrese Contraseña')\n self.lbltitle.SetFont(title_font)\n self.lbltitle.SetBackgroundColour(secondary_color)\n self.lbltitle.SetForegroundColour(principal_color)\n self.fgs.Add(self.lbltitle,pos=(2,1),span=(1,3), flag=wx.ALL | wx.ALIGN_CENTER, border=5)\n\n self.txtpass = wx.TextCtrl(self.panel, style= wx.TE_PASSWORD)\n self.fgs.Add(self.txtpass , pos=(3,1),span=(1,3), flag= wx.ALL | wx.EXPAND, border=5)\n \n btn_aceptar = wx.Button(self.panel, id=wx.ID_ANY, label=\"Aceptar\",size=(-1,-1))\n self.fgs.Add(btn_aceptar, pos=(6,1),span=(1,3), flag= wx.ALL | wx.ALIGN_CENTER, border=0)\n btn_aceptar.Bind(wx.EVT_BUTTON, self.onBtn_aceptar)\n\n mainSizer= wx.BoxSizer(wx.VERTICAL)\n mainSizer.Add(self.fgs,0, flag=wx.ALIGN_CENTER)\n self.panel.SetSizerAndFit(mainSizer)\n \n #-------------Button Functions-----------------# \n def onBtn_aceptar(self, event):\n self.config_sheet=wb_listas['Config']\n \n self.Destroy()\n #ww_nuevo_requerimiento12(parent=self.panel).Show()\n\n #-------------Button Functions-----------------# \n \n\n \n\nclass MyApp(wx.App):\n def OnInit(self):\n self.frame= MyFrame()\n self.frame.Show()\n return True \n\n# Run the program \napp=MyApp()\napp.MainLoop()\ndel app\n \n ","sub_path":"ContAntioquia-Logistica.py","file_name":"ContAntioquia-Logistica.py","file_ext":"py","file_size_in_byte":97630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"123781038","text":"\"\"\"\n1246. Palindrome Removal\n\nGiven an integer array arr, in one move you can select a palindromic subarray arr[i], arr[i+1], ..., arr[j]\nwhere i <= j, and remove that subarray from the given array. Note that after removing a subarray,\nthe elements on the left and on the right of that subarray move to fill the gap left by the removal.\n\nReturn the minimum number of moves needed to remove all numbers from the array.\n\n\n\nExample 1:\n\nInput: arr = [1,2]\nOutput: 2\nExample 2:\n\nInput: arr = [1,3,4,1,5]\nOutput: 3\nExplanation: Remove [4] then remove [1,3,1] then remove [5].\n\n\nConstraints:\n\n1 <= arr.length <= 100\n1 <= arr[i] <= 20\n\n\n\"\"\"\nimport functools\n\n\nclass MinimumMoves:\n\n def doit_dp(self, arr):\n @functools.lru_cache(maxsize=None)\n def dp(i: int, j: int) -> int:\n if i >= j:\n return int(i == j)\n currMin = 1 + dp(i + 1, j)\n for k in range(i + 1, j + 1):\n if arr[i] == arr[k]:\n currMin = min(currMin, max(1, dp(i + 1, k - 1)) + dp(k + 1, j))\n return currMin\n return dp(0, len(arr) - 1)\n\n def doit_dp_1(self, arr):\n \"\"\"\n Idea is simple, we use DP array to memorize the status. DP[i][j] represents the minimum steps to remove this sub-array.\n Thus DP[0][len(arr)-1] should be the final result. For a sub-array [i, j]. We consider such scenarios:\n\n if arr[i] == arr[j], then we know after we transform [i+1][j-1] to a palindrome, the whole sub-array [i, j] will also be a palindrome.\n But we still need to testify each part of this sub-array from left to right. For example, if the sub-array is [1,4,1,1,2,3,2,1],\n we should split this array to [1,4,1] and [1,2,3,2,1]. So we traverse the whole array and try to split the array [i, j] to [i, k], [k+1, j],\n whereas i <= k < j.\n DP[i][j] should be the smallest value from previous two conditions.\n \"\"\"\n dp = [[0] * len(arr) for _ in range(len(arr))]\n for j in range(len(arr)):\n for i in range(j, -1, -1):\n r = len(arr)\n if arr[i] == arr[j]:\n r = 1 if i + 1 > j - 1 or dp[i+1][j-1] == 0 else dp[i+1][j-1]\n for k in range(i, j):\n r = min(r, dp[i][k] + dp[k+1][j])\n dp[i][j] = r\n return dp[0][len(arr)-1]\n\n def doit_dp(self, arr):\n\n N = len(arr)\n dp = [[0 for _ in range(N+1)] for _ in range(N+1)]\n\n for L in range(N+1):\n for i in range(1, N - L):\n j = i + L - 1\n dp[i][j] = float('inf')\n for k in range(i, j+1):\n # xxxxxkxxxxj if k == j, dp[k][j] == dp[k+1][j-1]\n if arr[k] == arr[j]:\n dp[i][j] = min(dp[i][j], dp[i][k-1] + max(1, dp[k+1][j-1]))\n\n return dp[1][N]\n\n\nif __name__ == '__main__':\n\n MinimumMoves().doit_dp([1, 3, 4, 1, 5])\n\n MinimumMoves().doit_dp([1,2])\n\n","sub_path":"PythonLeetcode/Leetcode/1246_PalindromeRemoval.py","file_name":"1246_PalindromeRemoval.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"418608215","text":"import neighbors\nimport matplotlib.pyplot as plt\nfrom statistics import median\n\n'''\nThis script plots data from the eurostat panel.\n'''\n\npanel = neighbors.main()\n\nborders = [list(panel[el][panel['neighbor_joined']==True][panel[el].notnull()])\n for el in panel.columns.tolist()[8:-4]]\n\ninterior = [list(panel[el][panel['neighbor_joined']==False][panel[el].notnull()])\n for el in panel.columns.tolist()[8:-4]]\n\n# getting averages, years for every 2 years\n\ny1 = [sum(el)/len(el) for el in borders]\n\ny1s = [y1[i] for i in range(0,len(y1),2)]\n\ny2 = [sum(el)/len(el) for el in interior]\ny2s = [y2[i] for i in range(0,len(y2),2)]\n\nyears = panel.columns.tolist()[8:-4]\nyears1 = [i for i in range(1993,2014,2)]\n'''\n# plot\nplt.plot(years1,y1s, label = 'Border Regions')\nplt.plot(years1,y2s, label = 'Interior Regions')\nplt.xticks(years1)\nplt.plot((2004, 2004), (0,500), 'k-', label = '2004')\nplt.legend()\nplt.ylabel('Per Capita R&D Expenditure')\nplt.xlabel('Year')\n\n'''\n#plt.plot(years1[4:],y1s[4:], label = 'Border Regions')\nplt.plot(years1[4:],y2s[4:], label = 'Interior Regions')\nplt.xticks(years1)\nplt.plot((2004, 2004), (0,500), 'k-', label = '2004')\nplt.legend()\nplt.ylabel('Per Capita R&D Expenditure')\nplt.xlabel('Year')\n","sub_path":"Problem Sets/Problem Set 7/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"141548459","text":"from ConfigSpace.configuration_space import ConfigurationSpace\nfrom ConfigSpace.conditions import EqualsCondition, InCondition\nfrom ConfigSpace.hyperparameters import UniformFloatHyperparameter, \\\n UniformIntegerHyperparameter, CategoricalHyperparameter, \\\n UnParametrizedHyperparameter, Constant\nimport numpy as np\nfrom lightgbm import LGBMClassifier\n\nfrom mindware.components.utils.constants import *\nfrom autosklearn.pipeline.components.base import AutoSklearnClassificationAlgorithm\n\n\nclass LightGBM(AutoSklearnClassificationAlgorithm):\n def __init__(self, n_estimators, learning_rate, num_leaves, max_depth, min_child_samples,\n subsample, colsample_bytree, random_state=None):\n self.n_estimators = int(n_estimators)\n self.learning_rate = learning_rate\n self.num_leaves = num_leaves\n self.max_depth = max_depth\n self.subsample = subsample\n self.min_child_samples = min_child_samples\n self.colsample_bytree = colsample_bytree\n\n self.n_jobs = 1\n self.random_state = random_state\n self.estimator = None\n\n def fit(self, X, y):\n self.estimator = LGBMClassifier(num_leaves=self.num_leaves,\n max_depth=self.max_depth,\n learning_rate=self.learning_rate,\n n_estimators=self.n_estimators,\n min_child_samples=self.min_child_samples,\n subsample=self.subsample,\n colsample_bytree=self.colsample_bytree,\n n_jobs=self.n_jobs)\n self.estimator.fit(X, y)\n return self\n\n def predict(self, X):\n if self.estimator is None:\n raise NotImplementedError()\n return self.estimator.predict(X)\n\n def predict_proba(self, X):\n if self.estimator is None:\n raise NotImplementedError()\n return self.estimator.predict_proba(X)\n\n @staticmethod\n def get_properties(dataset_properties=None):\n return {'shortname': 'LightGBM Classifier',\n 'name': 'LightGBM Classifier',\n 'handles_regression': False,\n 'handles_classification': True,\n 'handles_multiclass': True,\n 'handles_multilabel': True,\n 'is_deterministic': False,\n 'input': (SPARSE, DENSE, UNSIGNED_DATA),\n 'output': (PREDICTIONS,)}\n\n @staticmethod\n def get_hyperparameter_search_space(dataset_properties=None, optimizer='smac'):\n cs = ConfigurationSpace()\n n_estimators = UniformFloatHyperparameter(\"n_estimators\", 100, 1000, default_value=500, q=50)\n num_leaves = UniformIntegerHyperparameter(\"num_leaves\", 31, 2047, default_value=128)\n max_depth = Constant('max_depth', 15)\n learning_rate = UniformFloatHyperparameter(\"learning_rate\", 1e-3, 0.3, default_value=0.1, log=True)\n min_child_samples = UniformIntegerHyperparameter(\"min_child_samples\", 5, 30, default_value=20)\n subsample = UniformFloatHyperparameter(\"subsample\", 0.7, 1, default_value=1, q=0.1)\n colsample_bytree = UniformFloatHyperparameter(\"colsample_bytree\", 0.7, 1, default_value=1, q=0.1)\n cs.add_hyperparameters([n_estimators, num_leaves, max_depth, learning_rate, min_child_samples, subsample,\n colsample_bytree])\n return cs\n","sub_path":"scripts/ausk_udf_models/lightgbm.py","file_name":"lightgbm.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"504297447","text":"import sys\nimport wx\nfrom wx import glcanvas\nfrom OpenGL.GL import *\n\nclass MyCanvas(glcanvas.GLCanvas):\n def __init__(self, parent):\n super(MyCanvas, self).__init__(parent, -1, attribList=[])\n self.context = glcanvas.GLContext(self)\n self.initialized = False\n self.Bind(wx.EVT_SIZE, self.OnSize)\n self.Bind(wx.EVT_PAINT, self.OnPaint)\n\n def InitGL(self):\n glClearColor(1.0, 1.0, 1.0, 1.0)\n\n def OnSize(self, event):\n w, h = self.GetClientSize()\n glViewport(0, 0, w, h)\n \n def OnPaint(self, event):\n self.SetCurrent(self.context)\n if not self.initialized:\n self.InitGL()\n self.initialized = True\n self.OnSize(event)\n glClear(GL_COLOR_BUFFER_BIT)\n glColor3d(1.0, 0.0, 0.0)\n glBegin(GL_POLYGON)\n glVertex2d(-0.9, -0.9)\n glVertex2d(0.9, -0.9)\n glVertex2d(0.9, 0.9)\n glVertex2d(-0.9, 0.9)\n glEnd()\n glFlush()\n\nif __name__ == '__main__':\n app = wx.App()\n frame = wx.Frame(None, -1, sys.argv[0], size=(300,300))\n canvas = MyCanvas(frame)\n frame.Show()\n app.MainLoop()\n app.Destroy()\n","sub_path":"python/example-5-4-1.py","file_name":"example-5-4-1.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"491192609","text":"from lxml import etree\n\nclass KCEncoding:\n def __init__(self):\n self.k = 800\n self.maxPooling = 1\n self.samples = None\n\n def toXML(self):\n root = etree.Element(\"encoding\")\n root.set(\"type\", \"kc\")\n\n k = etree.SubElement(root, \"k\")\n k.text = str(self.k)\n maxPooling = etree.SubElement(root, \"maxPooling\")\n maxPooling.text = str(self.maxPooling)\n\n samples = etree.SubElement(root, \"samples\")\n sampleSet = etree.SubElement(samples, \"set\")\n for sample in self.samples:\n sampleSet.append(sample.toXML())\n\n return root\n\n def init(self, run):\n self.samples = run.getClusteringSamples()\n","sub_path":"scripts/tet/encodings/kcencoding.py","file_name":"kcencoding.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"37576668","text":"#import packages\nimport heartpy as hp\nimport matplotlib.pyplot as plt\n\nsample_rate = 250\n\ndata = hp.get_data('sensorReading_new_16.txt')\n\nplt.figure(figsize=(12,4))\nplt.plot(data)\nplt.show()\n\n\n","sub_path":"ecg_reading/test_heartratepy.py","file_name":"test_heartratepy.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"287757279","text":"#doing all the relevant imports\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\n\n# Read in the image and convert to grayscale\nimage = mpimg.imread('/home/prabhat/Downloads/pycharm-community-2017.3.1/bin/exit-ramp.jpg')\nplt.subplot(221)\nplt.imshow(image)\n\n\ngray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n#plt.subplot(222)\n#plt.imshow(gray)\n\n# Define a kernel size for Gaussian smoothing / blurring\n# Note: this step is optional as cv2.Canny() applies a 5x5 Gaussian internally\nkernel_size = 3\nblur_gray = cv2.GaussianBlur(gray,(kernel_size, kernel_size), 0)\n#plt.subplot(223)\n#plt.imshow(blur_gray)\n\n# Define parameters for Canny and run it\n# NOTE: if you try running this code you might want to change these!\nlow_threshold = 50\nhigh_threshold = 150\nedges = cv2.Canny(blur_gray, low_threshold, high_threshold)\nplt.subplot(222)\nplt.imshow(edges, cmap='Greys_r')\n\nlow_threshold = 50\nhigh_threshold = 250\nedges = cv2.Canny(blur_gray, low_threshold, high_threshold)\nplt.subplot(223)\nplt.imshow(edges, cmap='Greys_r')\n\nlow_threshold = 10\nhigh_threshold = 350\nedges = cv2.Canny(blur_gray, low_threshold, high_threshold)\nplt.subplot(224)\nplt.imshow(edges, cmap='Greys_r')\n\nplt.show()","sub_path":"Canny.py","file_name":"Canny.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"210836774","text":"#Temp Folder Control by Fábio Pinto\nimport os\nfrom shutil import rmtree\nfrom win10toast import ToastNotifier\ntoaster = ToastNotifier()\npath = \"D:\\#TEMP\"\ntry:\n rmtree(path)\n os.makedirs(path)\nexcept:\n print('Erro')\ntoaster.show_toast('Temp Folder Control', '#TEMP Folder Cleared')","sub_path":"tempFolderControl.py","file_name":"tempFolderControl.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"33244831","text":"__author__ = 'Indah'\n\nimport random\nfrom random import shuffle\n\n# Crossover methods\n\n# n point crossover can be used for binary representation and integer representation\ndef nPointCrossover(n, cr, indSize, parent1, parent2):\n # cp=random.randrange(1,indSize)\n cpr=random.random()\n # random.random() choose random floating point number in the range [0, 1)\n cr=1\n if cprd', bytes_in)[0]\n##\n##\n##\n##def _decode_asc(bytes_in):\n## \"\"\"Decode ascii.\"\"\"\n## if PY2:\n## # transform bytes_in to a list of ints\n## bytes_ord = map(ord, bytes_in)\n## else:\n## # in PY3 this is already the case\n## bytes_ord = bytes_in\n## printable = map(ord, string.printable)\n## s = ''.join(chr(x) for x in bytes_ord if x in printable)\n## if not s:\n## s = None\n## return s\n##\n##\n## \n##\n##\n##filename = 'E:\\zshared\\ssView\\d_sg_stratton3d.sgy' #filename\n##fb=open(filename,'rb')\n## _read_ebcdic(fb)\n## _read_binary\n\n\n\n\n","sub_path":"SEGY_read.py","file_name":"SEGY_read.py","file_ext":"py","file_size_in_byte":5639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"178676262","text":"from twisted.internet import protocol\nfrom twisted.internet import reactor\nfrom twisted.internet import task\n\nimport time\n\nserver = None\nclient = None\n\nclass RouterServer(protocol.Protocol):\n def dataReceived(self, data):\n global client\n if client:\n client.transport.write(data)\n\nclass RouterServerFactory(protocol.Factory):\n def buildProtocol(self, addr):\n global server\n server = RouterServer()\n return server\n\nclass RouterClient(protocol.Protocol):\n def dataReceived(self, data):\n global server\n if server:\n server.transport.write(data)\n\nclass RouterClientFactory(protocol.ClientFactory):\n def buildProtocol(self, addr):\n global client\n client = RouterClient()\n return client\n\nreactor.connectTCP(\"localhost\", 8000, RouterClientFactory())\nreactor.listenTCP(8008, RouterServerFactory())\nreactor.run()\n","sub_path":"tests/ipc/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"592653714","text":"import os, sys\nimport numpy as np \nimport matplotlib.pyplot as plt \nimport cv2 \n\n# Load Dataset\ndef load_subset(subsets, base_path='data/yaleBfaces'):\n data = []\n label = []\n \n for subset in subsets:\n directory = os.path.join(base_path, \"subset\" + str(subset))\n files = os.listdir(directory)\n for img in files:\n face = cv2.imread(os.path.join(directory,img), cv2.IMREAD_GRAYSCALE)\n data.append(face)\n label.append(int(img.split('person')[1].split('_')[0]))\n return data, label\n\n# Draw faces\ndef draw_faces(img_list, col=10):\n fig = plt.figure(figsize = (30,30))\n if len(img_list) < col:\n col = len(img_list)\n row = 1\n else:\n row = int(len(img_list)/col)\n for sub_img in range(1,row*col+1):\n ax = fig.add_subplot(row, col, sub_img)\n ax.imshow(img_list[sub_img-1], cmap='gray')\n ax.axis('off')\n plt.show()\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"5705694","text":"import dataRetrival\nimport webScraper\nimport excel\nimport anomaly as an\n\ndef main(months, var):\n\tcurrentPrice = dataRetrival.getCurrentPrice()\n\tcurrentPrice = float(currentPrice)\n\tpastPricesX, pastPricesY = dataRetrival.getPastPrices(months)\n\tpreviousPrice = float(pastPricesY[len(pastPricesY) - 1])\n\tlinearVars = dataRetrival.linearRegression(pastPricesX, pastPricesY)\n\tdegree = dataRetrival.crossValidationDegree(pastPricesX, pastPricesY)\n\tpolyVars = dataRetrival.polynomialRegression(pastPricesX, pastPricesY, degree)\n\tanomaly = an.checkAnomaly(linearVars, polyVars, len(pastPricesX) + 1, currentPrice, var) #low\n\theadline = an.createHeadline(anomaly, currentPrice, previousPrice)\n\textraInfo = webScraper.extraBTCInfo(2)[0]\n\tpChange = int(((currentPrice - previousPrice) / (previousPrice)) * 100)\n\t#excel.WB(currentPrice, anomaly, headline, str(pChange) + \"%\", extraInfo)\n\t\nmain(3, 0)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"453202311","text":"import os\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtWebKit import QWebSettings\nfrom PyQt5.QtWebKitWidgets import QWebView, QWebPage\n#from jinja2.loaders import FileSystemLoader\nfrom winterstone.snowflake import getFileContent, CWD, VAULT\nfrom winterstone.base import WinterObject\nimport re\nfrom winterstone.base import Borg\nfrom urllib.request import urlopen\nfrom winterstone.baseQt import API, WinterQtApp\nfrom winterstone.extraQt import WinterSearch\n\ntry:\n from bottle import *\n\n TEMPLATE_PATH.append(CWD + 'templates/')\n BOTTLE_SUPPORT = True\nexcept:\n BOTTLE_SUPPORT = False\n\n def route(*args):\n pass\n\ntry:\n from jinja2 import Environment, PackageLoader\n\n JINJA2_SUPPORT = True\nexcept ImportError:\n JINJA2_SUPPORT = False\n# print('WARNING: JINJA2_SUPPORT disabled')\n\nclass EtherServer(QThread):\n def __init__(self, port=8080):\n QThread.__init__(self)\n self.port = port\n self.quiet = False\n debug(True)\n\n def run(self):\n run(host='localhost', port=self.port, quiet=self.quiet)\n\n\nclass EtherIntegration(Borg):\n def __init__(self, parent='', UI=False):\n Borg.__init__(self)\n self.ui = UI\n if not hasattr(self, 'parent') and parent:\n self.parent = parent\n if self.ui:\n self.server = EtherServer(port=4801)\n self.server.start()\n\n def showGreeting(self):\n self.parent.statusBar.showMessage(urlopen('http://api.averr.in/greeting').read())\n\n def getWebView(self, url='', toolbar=False, debug=False):\n if not self.ui:\n view = EtherWebView(inspect=True)\n else:\n view = EtherWebUI()\n if url:\n view.load(EtherUrl(url))\n\n frame = QWidget()\n frame.view = view\n lay = QVBoxLayout()\n if toolbar:\n frame.tb = QToolBar(frame)\n lay.addWidget(frame.tb)\n frame.tb.addWidget(QWidget().setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))\n frame.tb.addWidget(QLabel('Search: '))\n frame.tb.addWidget(WinterSearch(view))\n lay.addWidget(view)\n frame.setLayout(lay)\n\n if debug:\n view.page().settings().setAttribute(QWebSettings.DeveloperExtrasEnabled, True)\n\n return frame\n\n\nclass EtherWebView(QWebView):\n def __init__(self, inspect=False):\n QWebView.__init__(self)\n self.wi = EtherIntegration()\n self.hp = EtherUrl()\n self.api = API()\n self.page().setLinkDelegationPolicy(QWebPage.DelegateAllLinks)\n self.linkClicked.connect(self.lc)\n self.titleChanged.connect(self.tc)\n self.setRenderHint(QPainter.HighQualityAntialiasing)\n self.setRenderHint(QPainter.SmoothPixmapTransform)\n self.setRenderHint(QPainter.Antialiasing)\n settings = self.page().settings()\n settings.setFontFamily(QWebSettings.StandardFont, self.wi.parent.config.options.webview.main_font)\n settings.setFontSize(QWebSettings.DefaultFontSize, 24)\n if inspect:\n self.page().settings().setAttribute(QWebSettings.DeveloperExtrasEnabled, True)\n palette = QPalette()\n palette.setBrush(QPalette.Base, QBrush(QColor(self.wi.parent.config.options.webview.bg_color)))\n self.page().setPalette(palette)\n\n\n def tc(self, text):\n print(text)\n if text:\n self.lc(QUrl(text))\n\n def lc(self, link):\n print(link)\n if link.scheme() == 'winter':\n args = str(link.path())[1:].split('/')\n method = str(link.authority())\n try:\n module, method = method.split('.')\n except ValueError:\n module = 'main'\n try:\n if args[0]:\n self.emit('exec',method, module,*args)\n else:\n self.emit('exec',method, module)\n self.wi.parent.debug('Execute: %s(%s) [%s]' % (method, args, module))\n except Exception as e:\n self.api.error(e)\n# elif link.authority() not in ['#', '','localhost:4801']:\n elif link.authority() not in ['#', '']:\n self.wi.parent.debug('GoTo: [%s] %s%s' % (link.scheme(), link.authority(), link.path()))\n self.setUrl(link)\n else:\n pass\n\n\n def loadHomePage(self):\n self.load(self.hp)\n\n\n def setHomePage(self, link):\n print(link)\n self.hp = EtherUrl(link)\n\n def show(self, item):\n self.setUrl(EtherUrl(item.url))\n self.wi.parent.setTitle(item.name)\n\n def cd(self, path):\n pass\n\n def WFind(self, text):\n # self.findText('', QWebPage.HighlightAllOccurrences)\n self.q = text\n return self.findText(text)\n\n def WFindNext(self):\n res = self.findText(self.q)\n if res:\n return res\n else:\n self.onEmptyFind()\n return res\n\n def WFindPrev(self):\n res = self.findText(self.q, QWebPage.FindBackward)\n if res:\n return res\n else:\n self.onEmptyFind(reverse=True)\n return res\n\n def js(self, line):\n self.page().currentFrame().evaluateJavaScript(line)\n\n def onEmptyFind(self, reverse=False):\n self.api.showMessage('No string found')\n\n\nclass EtherUrl(QUrl):\n def __init__(self, link=''):\n if link and link[0] == '~':\n link = os.path.expanduser(link)\n if os.path.isfile(link):\n link = 'file://' + link\n QUrl.__init__(self, link)\n\n\nclass EtherWebUI(EtherWebView):\n def __init__(self, *args, **kwargs):\n EtherWebView.__init__(self, *args, **kwargs)\n\n\n # def load(self,url):\n # todo: implement template dirs\n # self.loadPage(os.path.basename(str(url.path())))\n\n def loadPage(self, url, **kwargs):\n # html = template(template_name, STATIC=CWD+'static/', **kwargs)\n # self.setContent(html, \"text/html\", QUrl('file://%s' % CWD))\n self.setUrl(QUrl('http://localhost:4801' + url))\n","sub_path":"Garden/etherstone/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"603300507","text":"import copy\n\nT = int(input(\"\"))\nB = 0\nM = 0\n\ndef paths(graph):\n path_count = {B:1}\n for i in range(B-1,0,-1): path_count[i] = sum(path_count[j] for j in range(i+1,B+1) if j in graph[i])\n return path_count[1]\n\ndef reachable(graph, initial, final):\n seen = set()\n stack = [initial]\n\n while (len(stack) > 0):\n next = stack.pop()\n if (next == final): return True\n seen.add(next)\n for i in graph[next]:\n if i not in seen: stack.append(i)\n\n return False\n\ndef find_answer(graph, target, current_node, to_remove):\n #print(graph, paths(graph), target)\n if (current_node == -1): return None\n if (to_remove <= current_node): return find_answer(graph, target, current_node-1, B)\n\n current_paths = paths(graph)\n #print(graph)\n #print(current_paths, target, current_node, to_remove)\n\n if current_paths == target: return graph\n if current_paths < target: return None\n\n # Removing edges will not decrease number of paths, since the target must be reachable\n if (not reachable(graph, to_remove, B)): return find_answer(graph, target, current_node, to_remove-1)\n\n # Either remove or do not remove\n new_graph = copy.deepcopy(graph)\n new_graph[current_node].remove(to_remove)\n\n answer1 = find_answer(new_graph, target, current_node, to_remove-1)\n if (answer1 != None): return answer1\n else: return find_answer(graph, target, current_node, to_remove - 1)\n\nfor a in range(1,T+1):\n (B,M) = (int(x) for x in input(\"\").split(\" \"))\n\n if (M > 2**(B-2)): print((\"Case #%d: IMPOSSIBLE\") % a)\n else:\n # Remove all unneeded vertices\n lower_bound = 0\n for b in range(B-2,-1,-1):\n if (2**b <= M):\n lower_bound = b + 3\n break\n# lower_bound = B+1\n\n graph = {i : set(j for j in range(i+1,B+1) if (j == 1 or j > B - lower_bound + 1)) if (i == 1 or i > B - lower_bound + 1) else {} for i in range(1,B+1)}\n\n answer = find_answer(graph, M, B-1, B)\n if (answer == None): print(\"ERROR\")\n\n else:\n print(\"Case #%d: POSSIBLE\" % a)\n for i in range(1,B+1): print(\"\".join([\"1\" if j in answer[i] else \"0\" for j in range(1,B+1)]))","sub_path":"solutions_5744014401732608_0/Python/JDJake/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"379639054","text":"# https://dmoj.ca/problem/ccc10s1\r\n# works for all testcases except testcase 1 and 9\r\n\r\ncases = int(input())\r\n\r\nnames = []\r\nscores = []\r\nbromine = True\r\nif cases == 0:\r\n bromine = False\r\n\r\nwhile bromine:\r\n for i in range(cases):\r\n name, ram, cpu, storage = input().split()\r\n names.append(name)\r\n score = int(ram) * 2 + int(cpu) * 3 + int(storage)\r\n scores.append(score)\r\n\r\n\r\n haram = max(scores)\r\n new = []\r\n\r\n for i in scores:\r\n new.append(i)\r\n\r\n new.remove(max(new))\r\n unharam = max(new) # second biggest score\r\n greatest = []\r\n\r\n\r\n counter = 0\r\n for i in range(len(scores)):\r\n if scores[i] == haram:\r\n greatest.append(counter)\r\n counter += 1\r\n\r\n counter = 0\r\n\r\n matrix = []\r\n if haram == unharam: # if the biggest numbers are the same\r\n alphabet = list(\"abcdefghijklmnopqrstuvwxyz\")\r\n bruh = names[greatest[0]].lower()\r\n for i in range(2):\r\n yes = list(bruh)\r\n inds = []\r\n for letter in yes:\r\n ind = alphabet.index(letter)\r\n inds.append(ind)\r\n matrix.append(inds)\r\n bruh = names[greatest[1]].lower()\r\n matrix.sort()\r\n yellow = []\r\n for i in matrix:\r\n zucc = []\r\n for e in i:\r\n zucc.append(alphabet[e])\r\n yellow.append(zucc)\r\n for i in yellow:\r\n print(''.join(i).upper())\r\n else: # if they are different\r\n print(names[scores.index(haram)])\r\n print(names[scores.index(unharam)])\r\n break\r\n\r\n'''\r\n4\r\nABC 13 22 1\r\nDEFK 20 20 20\r\nGHI 11 2 2\r\nJKL 20 20 20\r\n'''\r\n","sub_path":"CCC/CCC 10 S1 Computer Purchase.py","file_name":"CCC 10 S1 Computer Purchase.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"649881508","text":"from ..independence import CCA, HHG, KMERF, MGC, RV, Dcorr, Hsic\nfrom ._utils import _CheckInputs, k_sample_transform\nfrom .base import KSampleTest\n\n\nclass KSample(KSampleTest):\n r\"\"\"\n Class for calculating the *k*-sample test statistic and p-value.\n\n A *k*-sample test tests equality in distribution among groups. Groups\n can be of different sizes, but generally have the same dimensionality.\n There are not many non-parametric *k*-sample tests, but this version\n cleverly leverages the power of some of the implemented independence\n tests to test this equality of distribution.\n\n Parameters\n ----------\n indep_test : {\"CCA\", \"Dcorr\", \"HHG\", \"RV\", \"Hsic\", \"MGC\"}\n A string corresponding to the desired independence test from\n ``mgc.independence``. This is not case sensitive.\n compute_distance : callable(), optional (default: \"euclidean\")\n A function that computes the distance among the samples within each\n data matrix.\n Valid strings for ``metric`` are, as defined in\n ``sklearn.metrics.pairwise_distances``,\n\n - From scikit-learn: [‘cityblock’, ‘cosine’, ‘euclidean’, ‘l1’, ‘l2’,\n ‘manhattan’] See the documentation for scipy.spatial.distance for details\n on these metrics.\n - From scipy.spatial.distance: [‘braycurtis’, ‘canberra’, ‘chebyshev’,\n ‘correlation’, ‘dice’, ‘hamming’, ‘jaccard’, ‘kulsinski’, ‘mahalanobis’,\n ‘minkowski’, ‘rogerstanimoto’, ‘russellrao’, ‘seuclidean’,\n ‘sokalmichener’, ‘sokalsneath’, ‘sqeuclidean’, ‘yule’] See the\n documentation for scipy.spatial.distance for details on these metrics.\n\n Set to `None` or `precomputed` if `x` and `y` are already distance\n matrices. To call a custom function, either create the distance matrix\n before-hand or create a function of the form ``metric(x, **kwargs)``\n where `x` is the data matrix for which pairwise distances are\n calculated and kwargs are extra arguements to send to your custom function.\n bias : bool (default: False)\n Whether or not to use the biased or unbiased test statistics. Only\n applies to ``Dcorr`` and ``Hsic``.\n\n Notes\n -----\n The formulation for this implementation is as follows [#1Ksamp]_:\n\n The *k*-sample testing problem can be thought of as a generalization of\n the two sample testing problem. Define\n :math:`\\{ u_i \\stackrel{iid}{\\sim} F_U,\\ i = 1, ..., n \\}` and\n :math:`\\{ v_j \\stackrel{iid}{\\sim} F_V,\\ j = 1, ..., m \\}` as two groups\n of samples deriving from different distributions with the same\n dimensionality. Then, problem that we are testing is thus,\n\n .. math::\n\n H_0: F_U &= F_V \\\\\n H_A: F_U &\\neq F_V\n\n The closely related independence testing problem can be generalized\n similarly: Given a set of paired data\n :math:`\\{\\left(x_i, y_i \\right) \\stackrel{iid}{\\sim} F_{XY}, \\ i = 1, ..., N\\}`,\n the problem that we are testing is,\n\n .. math::\n\n H_0: F_{XY} &= F_X F_Y \\\\\n H_A: F_{XY} &\\neq F_X F_Y\n\n By manipulating the inputs of the *k*-sample test, we can create\n concatenated versions of the inputs and another label matrix which are\n necessarily paired. Then, any nonparametric test can be performed on\n this data.\n\n Letting :math:`n = \\sum_{i=1}^k n_i`, define new data matrices\n :math:`\\mathbf{x}` and :math:`\\mathbf{y}` such that,\n\n .. math::\n\n \\begin{align*}\n \\mathbf{x} &=\n \\begin{bmatrix}\n \\mathbf{u}_1 \\\\\n \\vdots \\\\\n \\mathbf{u}_k\n \\end{bmatrix} \\in \\mathbb{R}^{n \\times p} \\\\\n \\mathbf{y} &=\n \\begin{bmatrix}\n \\mathbf{1}_{n_1 \\times 1} & \\mathbf{0}_{n_1 \\times 1}\n & \\ldots & \\mathbf{0}_{n_1 \\times 1} \\\\\n \\mathbf{0}_{n_2 \\times 1} & \\mathbf{1}_{n_2 \\times 1}\n & \\ldots & \\mathbf{0}_{n_2 \\times 1} \\\\\n \\vdots & \\vdots & \\ddots & \\vdots \\\\\n \\mathbf{0}_{n_k \\times 1} & \\mathbf{0}_{n_k \\times 1}\n & \\ldots & \\mathbf{1}_{n_k \\times 1} \\\\\n \\end{bmatrix} \\in \\mathbb{R}^{n \\times k}\n \\end{align*}\n\n Additionally, in the two-sample case,\n\n .. math::\n\n \\begin{align*}\n \\mathbf{x} &=\n \\begin{bmatrix}\n \\mathbf{u}_1 \\\\\n \\mathbf{u}_2\n \\end{bmatrix} \\in \\mathbb{R}^{n \\times p} \\\\\n \\mathbf{y} &=\n \\begin{bmatrix}\n \\mathbf{0}_{n_1 \\times 1} \\\\\n \\mathbf{1}_{n_2 \\times 1}\n \\end{bmatrix} \\in \\mathbb{R}^n\n \\end{align*}\n\n Given :math:`\\mathbf{u}` and :math:`\\mathbf{v}`$` as defined above,\n to perform a :math:`w`-way test where :math:`w < k`,\n\n .. math::\n\n \\mathbf{y} =\n \\begin{bmatrix}\n \\mathbf{1}_{n_1 \\times 1} & \\mathbf{0}_{n_1 \\times 1}\n & \\ldots & \\mathbf{1}_{n_1 \\times 1} \\\\\n \\mathbf{1}_{n_2 \\times 1} & \\mathbf{1}_{n_2 \\times 1}\n & \\ldots & \\mathbf{0}_{n_2 \\times 1} \\\\\n \\vdots & \\vdots & \\ddots & \\vdots \\\\\n \\mathbf{0}_{n_k \\times 1} & \\mathbf{1}_{n_k \\times 1}\n & \\ldots & \\mathbf{1}_{n_k \\times 1} \\\\\n \\end{bmatrix} \\in \\mathbb{R}^{n \\times k}.\n\n where each row of :math:`\\mathbf{y}` contains :math:`w`\n :math:`\\mathbf{1}_{n_i}` elements. This leads to label matrix distances\n proportional to how many labels (ways) samples differ by, a hierarchy of distances\n between samples thought to be true if the null hypothesis is rejected.\n\n Performing a multilevel test involves constructing :math:`x` and :math:`y` using\n either of the methods above and then performing a block permutation [#2Ksamp]_.\n Essentially, the permutation is striated, where permutation is limited to be within\n a block of samples or between blocks of samples, but not both. This is done because\n the data is not freely exchangeable, so it is necessary to block the permutation to\n preserve the joint distribution [#2Ksamp]_.\n\n The p-value returned is calculated using a permutation test using a\n `permutation test `_.\n The fast version of the test (for :math:`k`-sample Dcorr and Hsic) uses a\n `chi squared approximation `_.\n\n References\n ----------\n .. [#1Ksamp] Panda, S., Shen, C., Perry, R., Zorn, J., Lutz, A., Priebe, C. E., &\n Vogelstein, J. T. (2019). Nonparametric MANOVA via Independence\n Testing. arXiv e-prints, arXiv-1910.\n .. [#2Ksamp] Winkler, A. M., Webster, M. A., Vidaurre, D., Nichols, T. E., &\n Smith, S. M. (2015). Multi-level block permutation. Neuroimage, 123,\n 253-268.\n \"\"\"\n\n def __init__(self, indep_test, compute_distance=\"euclidean\", bias=False, **kwargs):\n indep_test = indep_test.lower()\n test_names = {\n \"rv\": RV,\n \"cca\": CCA,\n \"hhg\": HHG,\n \"hsic\": Hsic,\n \"dcorr\": Dcorr,\n \"mgc\": MGC,\n \"kmerf\": KMERF,\n }\n if indep_test not in test_names.keys():\n raise ValueError(\"Test is not a valid independence test\")\n if indep_test == \"hsic\" and compute_distance == \"euclidean\":\n compute_distance = \"gaussian\"\n self.indep_test_name = indep_test\n indep_test = test_names[indep_test]\n\n if self.indep_test_name in [\"dcorr\", \"hhg\", \"hsic\", \"mgc\"]:\n if self.indep_test_name == \"hsic\":\n self.indep_test = indep_test(\n compute_kernel=compute_distance, bias=bias, **kwargs\n )\n elif self.indep_test_name == \"dcorr\":\n self.indep_test = indep_test(\n compute_distance=compute_distance, bias=bias, **kwargs\n )\n else:\n self.indep_test = indep_test(\n compute_distance=compute_distance, **kwargs\n )\n elif self.indep_test_name == \"kmerf\":\n self.indep_test = indep_test(forest_type=\"classifier\", **kwargs)\n else:\n self.indep_test = indep_test()\n\n # set is_distance to true if compute_distance is None\n self.is_distance = False\n if not compute_distance:\n self.is_distance = True\n\n KSampleTest.__init__(\n self, compute_distance=compute_distance, bias=bias, **kwargs\n )\n\n def _statistic(self, *args):\n r\"\"\"\n Calulates the *k*-sample test statistic.\n\n Parameters\n ----------\n *args : ndarrays\n Variable length input data matrices. All inputs must have the same\n number of samples. That is, the shapes must be `(n, p)` and\n `(m, p)` where `n` and `m` are the number of samples and `p` are\n the number of dimensions. Alternatively, inputs can be distance\n matrices, where the shapes must all be `(n, n)`.\n \"\"\"\n inputs = list(args)\n if self.indep_test_name == \"kmerf\":\n u, v = k_sample_transform(inputs, test_type=\"rf\")\n else:\n u, v = k_sample_transform(inputs)\n\n return self.indep_test._statistic(u, v)\n\n def test(self, *args, reps=1000, workers=1, auto=True):\n r\"\"\"\n Calculates the *k*-sample test statistic and p-value.\n\n Parameters\n ----------\n *args : ndarrays\n Variable length input data matrices. All inputs must have the same\n number of samples. That is, the shapes must be `(n, p)` and\n `(m, p)` where `n` and `m` are the number of samples and `p` are\n the number of dimensions. Alternatively, inputs can be distance\n matrices, where the shapes must all be `(n, n)`.\n reps : int, optional (default: 1000)\n The number of replications used to estimate the null distribution\n when using the permutation test used to calculate the p-value.\n workers : int, optional (default: 1)\n The number of cores to parallelize the p-value computation over.\n Supply -1 to use all cores available to the Process.\n auto : bool (default: True)\n Automatically uses fast approximation when sample size and size of array\n is greater than 20. If True, and sample size is greater than 20, a fast\n chi2 approximation will be run. Parameters ``reps`` and ``workers`` are\n irrelevant in this case. Only applies to ``Dcorr`` and ``Hsic``.\n\n Returns\n -------\n stat : float\n The computed *k*-Sample statistic.\n pvalue : float\n The computed *k*-Sample p-value.\n\n Examples\n --------\n >>> import numpy as np\n >>> from hyppo.ksample import KSample\n >>> x = np.arange(7)\n >>> y = x\n >>> z = np.arange(10)\n >>> stat, pvalue = KSample(\"Dcorr\").test(x, y)\n >>> '%.3f, %.1f' % (stat, pvalue)\n '-0.136, 1.0'\n \"\"\"\n inputs = list(args)\n check_input = _CheckInputs(\n inputs=inputs,\n indep_test=self.indep_test_name,\n )\n inputs = check_input()\n if self.indep_test_name == \"kmerf\":\n u, v = k_sample_transform(inputs, test_type=\"rf\")\n else:\n u, v = k_sample_transform(inputs)\n\n kwargs = {}\n if self.indep_test_name in [\"dcorr\", \"hsic\"]:\n kwargs = {\"auto\": auto}\n\n return self.indep_test.test(u, v, reps, workers, **kwargs)\n","sub_path":"hyppo/ksample/ksamp.py","file_name":"ksamp.py","file_ext":"py","file_size_in_byte":11754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"133393600","text":"import sys\nfrom JMTucker.Tools.BasicAnalyzer_cfg import cms, process\n\nprocess.TFileService.fileName = 'which_gen.root'\nprocess.source.fileNames = ['/store/user/tucker/TTJets_HadronicMGDecays_8TeV-madgraph/mfvntuple_v18/c761ddfa7f093d8f86a338439e06a1d4/ntuple_100_1_NHs.root']\nprocess.source.secondaryFileNames = cms.untracked.vstring(*'''/store/mc/Summer12_DR53X/TTJets_HadronicMGDecays_8TeV-madgraph/AODSIM/PU_S10_START53_V7A-v1/00000/0E9DCF01-0216-E211-934F-20CF3019DF0F.root\n/store/mc/Summer12_DR53X/TTJets_HadronicMGDecays_8TeV-madgraph/AODSIM/PU_S10_START53_V7A-v1/00000/E0F2FA59-1016-E211-A573-00259073E3A8.root\n/store/mc/Summer12_DR53X/TTJets_HadronicMGDecays_8TeV-madgraph/AODSIM/PU_S10_START53_V7A-v1/00000/5205EEA5-1016-E211-BBE1-90E6BA442F2B.root'''.split('\\n'))\n\nprocess.load('JMTucker.MFVNeutralino.VertexSelector_cfi')\nprocess.load('JMTucker.MFVNeutralino.AnalysisCuts_cfi')\nprocess.mfvAnalysisCutsOneVtx = process.mfvAnalysisCuts.clone(min_nvertex = 1)\n\nprocess.WhichGenParticleOneVtx = cms.EDAnalyzer('MFVWhichGenParticle',\n gen_particles_src = cms.InputTag('genParticles'),\n mevent_src = cms.InputTag('mfvEvent'),\n vertices_src = cms.InputTag('mfvSelectedVerticesTight'),\n )\nprocess.WhichGenParticleTwoVtx = process.WhichGenParticleOneVtx.clone()\n\nprocess.p = cms.Path(process.mfvSelectedVerticesSeq * process.mfvAnalysisCutsOneVtx * process.WhichGenParticleOneVtx * process.mfvAnalysisCuts * process.WhichGenParticleTwoVtx)\n\n\nif __name__ == '__main__' and hasattr(sys, 'argv') and 'submit' in sys.argv:\n import JMTucker.Tools.Samples as Samples\n samples = Samples.from_argv(Samples.ttbar_samples + Samples.qcd_samples)\n\n from JMTucker.Tools.CRABSubmitter import CRABSubmitter\n cs = CRABSubmitter('WhichGenParticle',\n job_control_from_sample = True,\n use_ana_dataset = True,\n use_parent = True,\n run_half_mc = True,\n )\n cs.submit_all(samples)\n","sub_path":"MFVNeutralino/test/which_gen.py","file_name":"which_gen.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"487744551","text":"import cv2\r\nimport numpy as np\r\nfrom PIL import Image, ImageFilter, ImageEnhance\r\nfilename = 'vison.jpg'\r\nimage = Image.open(filename)\r\nsize = width, height = image.size\r\n\r\ninput_image = cv2.imread('vison.jpg', cv2.IMREAD_COLOR)\r\nkernel = np.ones((5,5), np.uint8)\r\nerosion_image = cv2.erode(input_image, kernel, iterations=1)\r\ncv2.imshow('Input', input_image)\r\ncv2.imshow('Erosion', erosion_image)\r\n\r\ncv2.waitKey(0)\r\n\r\n","sub_path":"src/Erosion.py","file_name":"Erosion.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"563557239","text":"__author__ = 'Martin Othamar'\n\nimport logging\nimport logging.config\nimport sys\nimport socket\nimport thread\nimport threading\nimport json\nimport time\nfrom server import Server\nfrom ..config import logging_config\nlogger = logging_config.get_logger(\"ServerManager\")\n\n'''\n The ServerManager\n This is the core of the TVSchedule server application\n It takes input (events) from servers and clients, and handles them accordingly\n'''\n\nclass ServerManager:\n\n # Run server and client listener threads\n def __init__(self):\n # {i : {\"host\": localhost, \"port\": 0000, \"clients\": 0}}\n self.active_servers = {}\n self.server_listener = threading.Thread(target=self.listen_for_servers, args=())\n self.client_listener = threading.Thread(target=self.listen_for_clients, args=())\n self.server_listener.start()\n self.client_listener.start()\n\n\n # Run in a thread for listenening to server dropouts\n # TODO detect server dropouts, so that they can be removed from active_servers\n def listen_for_server_dropouts(self):\n while True:\n HOST = '' # Symbolic name meaning all available_ports interfaces\n PORT = 7999 # Arbitrary non-privileged port\n\n self.server_dropouts_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server_dropouts_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n # Bind socket to local host and port\n try:\n self.server_dropouts_socket.bind((HOST, PORT))\n except socket.error as error:\n logging.critical('ServerManager.listen_for_server_dropouts: Bind failed. Error Code : ' + str(error[0]) + ' Message ' + error[1])\n sys.exit() # Might aswell quit..\n\n # Start listening on socket\n self.server_dropouts_socket.listen(250)\n\n # I'm running the server socket, and creating threads for each accepting client\n while 1:\n connection, address = self.server_dropouts_socket.accept()\n thread.start_new_thread(self.unregister_server,(connection, address,))\n\n self.server_dropouts_socket.close()\n logger.debug('ServerManager.listen_for_server_dropouts: shut down, trying to restart the listener')\n\n\n # Run in a thread for listening to newly created servers, which then registeres through this socket\n def listen_for_servers(self):\n while True:\n HOST = '' # Symbolic name meaning all available_ports interfaces\n PORT = 7998 # Arbitrary non-privileged port\n\n self.server_listener_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server_listener_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n # Bind socket to local host and port\n try:\n self.server_listener_socket.bind((HOST, PORT))\n except socket.error as error:\n logging.critical('ServerManager.listen_for_servers: Bind failed. Error Code : ' + str(error[0]) + ' Message ' + error[1])\n sys.exit() # Might aswell quit..\n\n # Start listening on socket\n self.server_listener_socket.listen(250)\n\n # I'm running the server socket, and creating threads for each accepting client\n while 1:\n connection, address = self.server_listener_socket.accept()\n thread.start_new_thread(self.register_server,(connection, address,))\n\n self.server_listener_socket.close()\n logger.debug('ServerManager.listen_for_servers: shut down, trying to restart the listener')\n\n\n # Run in a thread for listening to new clients, assigning them to active servers\n def listen_for_clients(self):\n while True:\n HOST = '' # Symbolic name meaning all available_ports interfaces\n PORT = 8000 # Arbitrary non-privileged port\n\n\n self.client_listener_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.client_listener_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n # Bind socket to local host and port\n try:\n self.client_listener_socket.bind((HOST, PORT))\n except socket.error as error:\n logging.critical('ServerManager.listen_for_clients: Bind failed. Error Code : ' + str(error[0]) + ' Message ' + error[1])\n sys.exit() # Might aswell quit..\n\n # Start listening on socket\n self.client_listener_socket.listen(250)\n\n # I'm running the server socket, and creating threads for each accepting client\n while 1:\n connection, address = self.client_listener_socket.accept()\n thread.start_new_thread(self.assign_client,(connection, address,))\n\n self.client_listener_socket.close()\n logger.debug('ServerManager.listen_for_clients: shut down, trying to restart the listener')\n\n\n # Run in a thread to assign clients to available servers\n def assign_client(self, connection, address):\n try:\n server_info = { \"host\" : \"\", \"port\" : 0 }\n if self.active_servers == None or len(self.active_servers) == 0:\n server_info = {\"exception\" : \"Sorry, no server is currently up\"}\n else:\n low = {'0' : sys.maxint}\n for index, server in self.active_servers.iteritems():\n if server[\"clients\"] < low.itervalues().next():\n low.popitem()\n low[str(index)] = server[\"clients\"]\n i = low.iterkeys().next()\n self.active_servers[i][\"clients\"] += 1\n server_info = self.active_servers[i]\n connection.send(json.dumps(server_info))\n logger.debug(\"ServerManager.assign_client : \" + address[0] + \":\" + str(address[1]) + \" was assigned to \" + \\\n server_info[\"host\"] + \":\" + server_info[\"port\"] + \":\" + str(server_info[\"clients\"]))\n except Exception as exception:\n logger.error(\"ServerManager.assign_client : \" + address[0] + \":\" + str(address[1]) +\n \" could not be assigned, error during assignment occurred : \" + exception.message)\n\n\n # Register servers, which clients later can be assigned to\n def register_server(self, connection, address):\n try:\n # {i : {\"host\": localhost, \"port\": 0000, \"clients\": 0}}\n server_info = connection.recv(1024)\n server_info = json.loads(server_info)\n server_info = {\"host\": server_info[\"host\"], \"port\": server_info[\"port\"], \"clients\": 0}\n reply = {\"success\" : \"Sucessfully registered server\"}\n for index, server in self.active_servers.iteritems():\n if server[\"host\"] == server_info[\"host\"] and str(server[\"port\"]) == str(server_info[\"port\"]):\n reply = {\"exception\" : \"server already registered\"}\n i = str(len(self.active_servers))\n self.active_servers[i] = server_info\n connection.send(json.dumps(reply))\n if reply.has_key(\"success\"):\n logger.debug(\"ServerManager.register_server : registered \" + server_info[\"host\"] + \":\" + str(server_info[\"port\"]))\n elif reply.has_key(\"exception\"):\n logger.error(\"ServerManager.register_server : failed to register server \" + server_info[\"host\"] + \":\" + str(server_info[\"port\"]))\n except Exception as exception:\n logger.error(\"ServerManager.register_server : failed to process registering of server \" + address[0] + \":\" + str(address[1]))\n\n\n # If a server exits, it has to register with the Manager to report it, this is handled here\n # We need to ensure that active_servers data is always updated to maintain efficiency\n def unregister_server(self, connection, address):\n try:\n server_info = connection.recv(1024)\n amount_of_servers = len(self.active_servers)\n for index, server in self.active_servers.iteritems():\n if server[\"host\"] == server_info[\"host\"] and str(server[\"port\"]) == str(server_info[\"port\"]):\n del self.active_servers[index]\n logger.debug(\"ServerManager.unregister_server : unregistered server \" + server_info[\"host\"] + \":\" + str(server_info[\"port\"]))\n if amount_of_servers == len(self.active_servers):\n logger.error(\"ServerManager.unregister_server : failed to unregistered server \" + server_info[\"host\"] + \":\" + str(server_info[\"port\"]))\n except Exception as exception:\n logger.error(\"ServerManager.unregister_server : failed to process unregistering of server \" + address[0] + \":\" + str(address[1]))\n\n\n# Bootstrap\nif __name__ == '__main__':\n application = ServerManager()\n logger.debug('ServerManager: sucessfully up and running. Now listening for servers and clients')\n time.sleep(1)\n default_server1 = threading.Thread(target=Server, args=())\n default_server2 = threading.Thread(target=Server, args=())\n default_server3 = threading.Thread(target=Server, args=())\n default_server1.start()\n time.sleep(1)\n default_server2.start()\n time.sleep(1)\n default_server3.start()\n application.server_listener.join()\n logger.fatal(\"ServerManager server_listener failed\")\n application.client_listener.join()\n logger.fatal(\"ServerManager client_listener failed\")\n del default_server1\n del default_server2\n del default_server3","sub_path":"Python/server/server_manager.py","file_name":"server_manager.py","file_ext":"py","file_size_in_byte":9582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"5242922","text":"import operator\nimport doc_manager as dm\nimport json\n\n\nclass Node(object):\n\n def __init__(self, word, value):\n self.value = value\n self.word = word\n self.parent = None\n self.code = 0\n self.point = 0\n self.points_reverse = []\n self.codes_reverse = []\n\n\nclass Huffman(object):\n\n def __init__(self):\n self.all_words = []\n self.cur_words = []\n self.point = 0\n\n def build_tree(self, all_words):\n sorted_words = sorted(all_words.items(), key=operator.itemgetter(1))\n self.cur_words = list(map(lambda word: Node(word[0], word[1]), sorted_words))\n for word in self.cur_words:\n self.all_words.append(word)\n\n # start build tree\n while len(self.cur_words) > 1:\n if self.point % 1000 == 0:\n print('Iteration: %s' % self.point)\n print('Len of left Words: %s' % len(self.cur_words))\n\n min1 = self.cur_words[0]\n min2 = self.cur_words[1]\n parent = Node('', min1.value + min2.value)\n min1.parent = parent\n min2.parent = parent\n # left branch\n min1.code = 0\n # right branch\n min2.code = 1\n # parameter index of parent\n parent.point = self.point\n self.point += 1\n\n # remove old words\n self.cur_words.remove(min1)\n self.cur_words.remove(min2)\n\n # insert new words\n added = 0\n for i in range(len(self.cur_words)):\n if parent.value < self.cur_words[i].value:\n self.cur_words.insert(i, parent)\n added = 1\n break\n if added == 0:\n self.cur_words.append(parent)\n\n def calculate_points_codes(self):\n for word in self.all_words:\n cur = word\n while cur.parent:\n word.points_reverse.append(cur.parent.point)\n word.codes_reverse.append(cur.code)\n cur = cur.parent\n\n\ndef get_node_data(node):\n return {'points': node.points_reverse[::-1], 'codes': node.codes_reverse[::-1]}\n\n\ndef test():\n all_words, all_docs = dm.handle_raw_data('data/movielen/ml_plot.txt')\n huffman = Huffman()\n huffman.build_tree(all_words)\n huffman.calculate_points_codes()\n huffman_tree = {}\n for node in huffman.all_words:\n huffman_tree[node.word] = get_node_data(node)\n plot = open('data/movielen/handled/tree.txt', 'w')\n plot.write(json.dumps(huffman_tree))\n doc = open('data/movielen/handled/doc.txt', 'w')\n doc.write(json.dumps(all_docs))\n words = open('data/movielen/handled/words.txt', 'w')\n words.write(json.dumps(all_words))\n\n plot.close()\n doc.close()\n words.close()\n\nif __name__ == '__main__':\n test()\n","sub_path":"codes/PMDV/huffman.py","file_name":"huffman.py","file_ext":"py","file_size_in_byte":2849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"156672476","text":"\"\"\"\n domonic.templates\n ====================================\n some builtin templates\n\n\"\"\"\n\nfrom domonic import *\n\n\nclass status_page():\n\n STATUS = {\n 200: \"200 OK\",\n 201: \"201 Created\",\n 202: \"202 Accepted\",\n 203: \"203 Non-Authoritative Information\",\n 204: \"204 No Content\",\n 205: \"205 Reset Content\",\n 206: \"206 Partial Content\",\n 207: \"207 Multi-Status\",\n 208: \"208 Already Reported\",\n 226: \"226 IM Used\",\n 300: \"300 Multiple Choices\",\n 301: \"301 Moved Permanently\",\n 302: \"302 Found\",\n 303: \"303 See Other\",\n 304: \"304 Not Modified\",\n 305: \"305 Use Proxy\",\n 306: \"306 Switch Proxy\",\n 307: \"307 Temporary Redirect\",\n 308: \"308 Permanent Redirect\",\n 400: \"400 Bad Request\",\n 401: \"401 Unauthorized\",\n 402: \"402 Payment Required\",\n 403: \"403 Forbidden\",\n 404: \"404 Not Found\",\n 405: \"405 Method Not Allowed\",\n 406: \"406 Not Acceptable\",\n 407: \"407 Proxy Authentication Required\",\n 408: \"408 Request Timeout\",\n 409: \"409 Conflict\",\n 410: \"410 Gone\",\n 411: \"411 Length Required\",\n 412: \"412 Precondition Failed\",\n 413: \"413 Payload Too Large\",\n 414: \"414 URI Too Long\",\n 415: \"415 Unsupported Media Type\",\n 416: \"416 Range Not Satisfiable\",\n 417: \"417 Expectation Failed\",\n 418: \"418 I'm a teapot\",\n 421: \"421 Misdirected Request\",\n 422: \"422 Unprocessable Entity\",\n 423: \"423 Locked\",\n 424: \"424 Failed Dependency\",\n 426: \"426 Upgrade Required\",\n 428: \"428 Precondition Required\",\n 429: \"429 Too Many Requests\",\n 431: \"431 Request Header Fields Too Large\",\n 451: \"451 Unavailable For Legal Reasons\",\n 499: \"499 Client Closed Request\",\n 500: \"500 Internal Server Error\",\n 501: \"501 Not Implemented\",\n 502: \"502 Bad Gateway\",\n 503: \"503 Service Unavailable\",\n 504: \"504 Gateway Timeout\",\n 505: \"505 HTTP Version Not Supported\",\n 506: \"506 Variant Also Negotiates\",\n 507: \"507 Insufficient Storage\",\n 508: \"508 Loop Detected\",\n 510: \"510 Not Extended\",\n 511: \"511 Network Authentication Required\",\n\n }\n\n DEBUG_MODE = False\n\n def __init__(self, code=404, wholepage=True):\n self.status_code = code\n self.message = self.STATUS[code]\n # self.host = \"http://localhost:8000\"\n self.wholepage = wholepage\n\n self.status_node = div(_id=\"status\")\n self.status_node += h1(f\"{self.status_code}\")\n self.status_node += p(self.message)\n\n # if status_page.DEBUG_MODE:\n # import os\n # self.status_node.appendChild(\n # pre(os.environ)\n # )\n # import sys\n # self.status_node.appendChild(\n # pre(sys.path)\n # )\n # import traceback\n # self.status_node.appendChild(\n # pre(traceback.format_exc())\n # )\n\n page = html(\n head(title(f\"{self.status_code}\")),\n body(self.status_node)\n )\n self.content = self.wholepage and page or self.status_node\n\n # def __getattr__(self, name):\n # return getattr(self.status_node, name)\n\n # def __setattr__(self, name, value):\n # setattr(self.status_node, name, value)\n\n def __str__(self):\n return f'{self.content}'\n\n # @staticmethod\n # def to_html(output_dir):\n # for code, name in status_page.STATUS.items():\n # page = status_page(code, name)\n # with open(f\"{output_dir}/{code}.html\", \"w\") as f:\n # f.write(f'{page}')\n\n\n'''\nclass d_b():\n def __init__(self):\n pass\n def config(self, db_name, db_user, db_pass, db_host, db_port):\n self.db_name = db_name\n self.db_user = db_user\n self.db_pass = db_pass\n self.db_host = db_host\n self.db_port = db_port\n # def connect(self):\n # def query(self, query):\n # def close(self):\n'''\n\n\n# from domonic.templates import status_page\n\n# @app.errorhandler(404)\n# def page_not_found(e):\n# return status_page(404)\n","sub_path":"domonic/templates/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"343109807","text":"import random\nimport math\nimport pygame\nimport Cell\nfrom random import randint\nfrom ConfigParser import SafeConfigParser\n\nclass MountainFactory(object):\n\n\tdef __init__(self, model, configManager):\n\t\tself.model = model\n\t\tself.configManager = configManager\n\n\t\trandom.seed(configManager.WORLDGENSEED)\n\n\t# Add a single mountain range.\n\tdef createMountainRange(self):\n\t\t# Get length of range.\n\t\tactualRangeLength = self.getActualRangeLength()\n\n\t\t# Place the first mountain of the range.\n\t\tlastMountainPlaced = initialMountain = self.placeInitialMountain()\n\n\t\t# Record that one mountain has been placed.\n\t\tmountainsPlaced = 1\n\n\t\t# Get direction mountain range will move in.\n\t\thorizontalOffset, verticalOffset = self.getNewDirection(0,0)\n\n\t\t# Ensures we don't get stuck in this loop forever.\n\t\tacceptableTries = actualRangeLength * 20\n\t\tloops = 0\n\n\t\t# Whilst we still have mountains to place.\n\t\twhile mountainsPlaced < actualRangeLength and loops < acceptableTries:\n\t\t\t# Get new mountain co-ordinates.\n\t\t\tnewMountainCoords = [lastMountainPlaced[0] + horizontalOffset, lastMountainPlaced[1] + verticalOffset]\n\n\t\t\tif not self.model.outOfBounds(newMountainCoords):\n\n\t\t\t\t# Get tile at new mountain co-ordinates.\n\t\t\t\tnewMountainTile = self.model.getCell(newMountainCoords)\n\t\t\t\t# Try to add the mountain. If it's successful then set last mountain placed and the amount of mountains placed.\n\t\t\t\tif newMountainTile.land and not newMountainTile.mountain:\n\t\t\t\t\tself.model.placeMountainCell(newMountainCoords)\n\t\t\t\t\tlastMountainPlaced = newMountainCoords\n\t\t\t\t\tmountainsPlaced += 1\n\n\t\t\t\t# Get new direction, if randint(0,100) isn't within correct range, will just return existing direction.\n\t\t\t\thorizontalOffset, verticalOffset = self.getNewDirection(0,0, randint(0,100))\n\n\t\t\t# Increase loop counter.\n\t\t\tloops += 1\n\n\n\tdef getActualRangeLength(self):\n\t\t# Max size of mountain ranges.\n\t\tminRangeLength = int(self.configManager.MOUNTAINLENGTHMIN)\n\t\tmaxRangeLength = int(self.configManager.MOUNTAINLENGTHMAX)\n\n\t\tif minRangeLength >= maxRangeLength:\n\t\t\tactualRangeLength = minRangeLength\n\t\telse:\n\t\t\tactualRangeLength = randint(minRangeLength, maxRangeLength)\n\n\t\treturn actualRangeLength\n\n\tdef placeInitialMountain(self):\n\t\tfirstMountain = [randint(0, self.model.worldCellWidth - 1), randint(0, self.model.worldCellHeight - 1)]\n\t\tfirstMountainTile = self.model.getCell(firstMountain)\n\t\twhile self.model.outOfBounds(firstMountain) and firstMountainTile.land:\n\t\t\tfirstMountain = [randint(0, self.model.worldCellWidth - 1), randint(0, self.model.worldCellHeight - 1)]\n\t\t\tfirstMountainTile = self.model.getCell(firstMountain)\n\n\t\tself.model.placeMountainCell(firstMountain)\n\n\t\treturn firstMountain\n\n\tdef getNewDirection(self, horizontal, vertical, diceRoll = False):\n\t\tdirectionChangeChance = self.configManager.MOUNTAINDIRECTIONCHANGECHANCE\n\n\t\tif diceRoll == False:\n\t\t\tdiceRoll = directionChangeChance + 1\n\t\tif diceRoll <= directionChangeChance:\n\t\t\treturn horizontal, vertical\n\n\t\tnewHorizontal = newVertical = 0\n\n\t\twhile newHorizontal == horizontal and newVertical == vertical or (newHorizontal == 0 and newVertical == 0):\n\t\t\tnewHorizontal = randint(-1,1)\n\t\t\tnewVertical = randint(-1,1)\n\n\t\treturn newHorizontal, newVertical","sub_path":"Model/MountainFactory.py","file_name":"MountainFactory.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"442132709","text":"from uuid import uuid4\nfrom itsim.datastore.datastore import DatastoreRestClient\nfrom itsim.schemas.items import create_json_node, create_json_network_event, create_json_log\nfrom itsim.time import now_iso8601\n\n\ndef test_store_load_node():\n \"\"\"\n Stores/loads a node into each of the supported database tables.\n :return:\n \"\"\"\n sim_uuid = uuid4()\n node_uuid = uuid4()\n timestamp = now_iso8601()\n\n node = create_json_node(sim_uuid=sim_uuid,\n timestamp=timestamp,\n uuid=node_uuid,\n node_label='1')\n\n datastore = DatastoreRestClient(sim_uuid=sim_uuid)\n\n datastore.store_item(node)\n result = datastore.load_item('node', node_uuid)\n assert result.uuid == str(node_uuid)\n\n\ndef test_store_load_network_event():\n \"\"\"\n Stores/loads a network event into each of the supported database tables.\n :return:\n \"\"\"\n sim_uuid = uuid4()\n network_uuid = uuid4()\n\n network_event = create_json_network_event(sim_uuid=sim_uuid,\n timestamp=now_iso8601(),\n uuid=network_uuid,\n uuid_node=uuid4(),\n network_event_type='open',\n protocol='UDP',\n pid=32145,\n src=['192.168.1.1', 64],\n dst=['192.168.11.200', 72])\n\n datastore = DatastoreRestClient(sim_uuid=sim_uuid)\n\n datastore.store_item(network_event)\n result = datastore.load_item('network_event', network_uuid)\n assert result.uuid == str(network_uuid)\n\n\ndef test_store_load_log():\n \"\"\"\n Stores/loads a log into each of the supported database tables.\n :return:\n \"\"\"\n sim_uuid = uuid4()\n log_uuid = uuid4()\n\n log = create_json_log(sim_uuid=sim_uuid,\n timestamp=now_iso8601(),\n uuid=log_uuid,\n content='log msg',\n level='DEBUG')\n\n datastore = DatastoreRestClient(sim_uuid=sim_uuid)\n\n datastore.store_item(log)\n result = datastore.load_item('log', log_uuid)\n assert result.uuid == str(log_uuid)\n","sub_path":"tests/datastore/test_datastore.py","file_name":"test_datastore.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"262386541","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass IndexTracker(object):\n def __init__(self, ax, X):\n self.ax = ax\n ax.set_title('use scroll wheel to navigate images')\n\n self.X = X\n rows, cols, self.slices = X.shape\n self.ind = self.slices//2\n\n self.im = ax.imshow(self.X[:, :, self.ind])\n self.update()\n\n def onscroll(self, event):\n if event.button == 'up':\n self.ind = (self.ind + 1) % self.slices\n else:\n self.ind = (self.ind - 1) % self.slices\n self.update()\n\n def update(self):\n self.im.set_data(self.X[:, :, self.ind])\n self.ax.set_ylabel('slice %s' % self.ind)\n self.im.axes.figure.canvas.draw()\n\n\ndef plot3d(X):\n \"\"\"\n X is a height x width x depth array.\n \"\"\"\n fig, ax = plt.subplots(1, 1)\n tracker = IndexTracker(ax, X)\n fig.canvas.mpl_connect('scroll_event', tracker.onscroll)\n plt.show()\n\n\ndef plot2d(X):\n img = plt.imshow(X, interpolation='nearest')\n plt.axis('off')\n plt.show()\n","sub_path":"plot_utils.py","file_name":"plot_utils.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"352956110","text":"# -*- coding: utf-8 -*-\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Callable\n\nfrom .validator import Validator\n\nif TYPE_CHECKING:\n from pygerber.drawing_state import DrawingState\n from pygerber.tokens.token import Token\n\n\nclass Float(Validator):\n def __call__(self, token: Token, state: DrawingState, value: str) -> float:\n if value is not None:\n return float(value)\n else:\n return self.default\n\n\nclass Int(Validator):\n def __call__(self, token: Token, state: DrawingState, value: str) -> int:\n if value is not None:\n return int(value)\n else:\n return self.default\n\n\nclass String(Validator):\n def __call__(self, token: Token, state: DrawingState, value: str) -> str:\n if value is not None:\n return str(value)\n else:\n return self.default\n\n\nclass Function(Validator):\n def __init__(self, function: Callable) -> None:\n self.function = function\n super().__init__(default=None)\n\n def __call__(self, token: Token, state: DrawingState, value: str) -> str:\n return self.function(token, value)\n","sub_path":"src/pygerber/validators/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"33538461","text":"from Tkinter import *\nfrom PIL import Image, ImageTk\nimport math\nimport time\n\n\nclass App:\n\n def __init__(self, master):\n\n #initialize physics settings\n self.now = lambda: int(time.time() * 1000)\n self.lastTime = self.now()\n self.ROD_LENGTH_METERS = 2.25\n self.GRAVITY = -10\n\n #initialize display settings\n self.WINDOW_WIDTH, self.WINDOW_HEIGHT = 600, 600\n self.METER_SIZE_PIXELS = 100\n self.ROD_LENGTH_PIXELS = self.ROD_LENGTH_METERS * self.METER_SIZE_PIXELS\n\n #initialize the window\n self.master = master\n self.panel = Canvas( master,\n width=self.WINDOW_WIDTH,\n height=self.WINDOW_HEIGHT,\n )\n self.panel.pack()\n\n #position the rod\n self.theta = 0.7853 #45 degrees, in radians\n self.omega = 0.0 #radians/second\n\n \n\n img = Image.open('swirl.jpg')\n self.bg = ImageTk.PhotoImage(img)\n self.panel.create_image((self.WINDOW_WIDTH/2, self.WINDOW_HEIGHT/2),\n image=self.bg\n )\n #draw the rod\n self.line = self.panel.create_line(\n self.WINDOW_WIDTH/2,\n self.WINDOW_HEIGHT/2,\n self.WINDOW_WIDTH/2 + math.sin(self.theta)*self.ROD_LENGTH_PIXELS,\n self.WINDOW_HEIGHT/2 + math.cos(self.theta)*self.ROD_LENGTH_PIXELS,\n width=10,\n fill='white'\n ) \n\n\n def draw(self):\n '''\n redraw the rod\n '''\n #dt = (self.now() - self.lastTime)/1000.0\n dt = 0.03\n self.theta += self.omega*dt\n self.omega += 1.5 * self.GRAVITY * math.sin(self.theta) / self.ROD_LENGTH_METERS * dt\n self.panel.coords(\n self.line,\n self.WINDOW_WIDTH/2,\n self.WINDOW_HEIGHT/2,\n self.WINDOW_WIDTH/2 + math.sin(self.theta)*self.ROD_LENGTH_PIXELS,\n self.WINDOW_HEIGHT/2 + math.cos(self.theta)*self.ROD_LENGTH_PIXELS,\n )\n self.panel.update()\n self.lastTime = self.now()\n\n\n\n\nroot = Tk()\nroot.title(\"Simple Pendulum\")\napp = App(root)\nwhile True:\n app.draw()\n root.update_idletasks()\n root.update()\n time.sleep(0.02)\n \nroot.mainloop()\n","sub_path":"SimplePendulum.py","file_name":"SimplePendulum.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"498812519","text":"#!/usr/bin/python\n\nimport common\nfrom bson.objectid import ObjectId\nimport logging\nimport mangadb\nimport beanstalkc\nimport pickle\nfrom datetime import datetime\nimport calendar\nimport time\n\n\ndef migrate_series():\n\tcursor = mangadb.sourceDb.series.find({\"has_migrated\" : {\"$exists\" : False} })\n\tif cursor is None:\n\t\tcommon.logger.info(\"Series cursor was empty. Returning\")\n\t\treturn\n\n\tcount = cursor.count()\n\tcommon.logger.info(\"Got %s series to migrate\", count)\n\n\tcounter = 0\n\tfor source_record in cursor:\n\t\tprod_record = {}\n\t\tprod_record['_id'] = source_record.get('_id')\n\t\tprod_record['name'] = source_record.get('name')\n\t\tprod_record['author'] = source_record.get('author')\n\t\tprod_record['artist'] = source_record.get('artist')\n\t\tprod_record['summary'] = source_record.get('summary')\n\t\tprod_record['url_segment'] = source_record.get('url_segment')\n\t\tprod_record['cover_image_url'] = source_record.get('cover_image')\n\t\tprod_record['genres'] = source_record.get('genres')\n\t\tprod_record['time_created'] = calendar.timegm(time.gmtime())\n\n\t\tsource_release_year = source_record.get('year_of_release');\n\t\tif source_release_year is not None and source_release_year.isdigit():\n\t\t\tprod_record['year_of_release'] = int(source_release_year)\n\n\t\tmangadb.prodDb.series.save(prod_record, manipulate=True)\n\t\tmangadb.sourceDb.series.update({\"_id\" : source_record.get('_id')} , {\"$set\" : {\"has_migrated\" : True}})\n\n\t\tcommon.logger.info(\"Series %s / %s\", counter, count)\n\t\tcounter = counter + 1\n\n\ndef migrate_chapters():\n\tcursor = mangadb.sourceDb.chapters.find({\"has_migrated\" : {\"$exists\" : False} })\n\tif cursor is None:\n\t\tcommon.logger.info(\"Chapters cursor was empty. Returning\")\n\t\treturn\n\n\tcount = cursor.count()\n\tcommon.logger.info(\"Got %s series to migrate\", count)\n\n\tcounter = 0\n\tfor source_record in cursor:\n\t\tprod_record = {}\n\t\tprod_record['_id'] = source_record.get('_id')\n\t\tprod_record['series_id'] = source_record.get('series_id')\n\t\tprod_record['name'] = source_record.get('name')\n\t\tprod_record['title'] = source_record.get('title')\n\t\tprod_record['sequence_number'] = source_record.get('sequence_number')\n\t\tprod_record['time_created'] = calendar.timegm(time.gmtime())\n\t\t\n\t\tdate_added_str = prod_record.get('date_added')\n\t\tif date_added_str is not None:\n\t\t\tdt = datetime.strptime(date_added_str, '%m/%d/%Y')\n\t\t\tprod_record['release_date'] = calendar.timegm(dt.utctimetuple())\n\n\t\tmangadb.prodDb.chapters.save(prod_record, manipulate=True)\n\t\tmangadb.sourceDb.chapters.update({\"_id\" : source_record.get('_id')} , {\"$set\" : {\"has_migrated\" : True}})\n\n\t\tcommon.logger.info(\"Chapter %s / %s\", counter, count)\n\t\tcounter = counter + 1\n\ndef migrate_pages():\n\tcursor = mangadb.sourceDb.pages.find({\"has_migrated\" : {\"$exists\" : False} })\n\tif cursor is None:\n\t\tcommon.logger.info(\"Pages cursor was empty. Returning\")\n\t\treturn\n\n\tcount = cursor.count()\n\tcommon.logger.info(\"Got %s pages to migrate\", count)\n\n\tcounter = 0\n\tfor source_record in cursor:\n\t\tprod_record = {}\n\t\tprod_record['_id'] = source_record.get('_id')\n\t\tprod_record['series_id'] = source_record.get('series_id')\n\t\tprod_record['chapter_id'] = source_record.get('chapter_id')\n\t\tprod_record['sequence_number'] = source_record.get('name')\n\t\tprod_record['image_url'] = source_record.get('image')\n\t\tprod_record['time_created'] = calendar.timegm(time.gmtime())\n\t\n\t\tmangadb.prodDb.pages.save(prod_record, manipulate=True)\n\t\tmangadb.sourceDb.pages.update({\"_id\" : source_record.get('_id')} , {\"$set\" : {\"has_migrated\" : True}})\n\n\t\tcommon.logger.info(\"Page %s / %s\", counter, count)\n\t\tcounter = counter + 1\n\ndef main():\n\tcommon.logger.info(\"Migrating series...\")\n\tmigrate_series();\n\n\tcommon.logger.info(\"Migrating chapters...\")\n\tmigrate_chapters();\n\n\tcommon.logger.info(\"Migrating pages...\")\n\tmigrate_pages();\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"source/source_to_production_migration.py","file_name":"source_to_production_migration.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"353328970","text":"import sys\nimport json\nimport tweepy\nimport requests\nimport math\nimport time\nfrom elasticsearch import Elasticsearch\nfrom kafka import KafkaProducer\nfrom dateutil import parser\nfrom datetime import datetime\nfrom config import consumer_key, consumer_secret, access_token, access_secret, es_endpoint, google_api\nimport urllib3\nurllib3.disable_warnings()\n\nif sys.version_info[0] == 2:\n from httplib import IncompleteRead\nelse:\n from http.client import IncompleteRead\n\n\nFILTERED_KEYWORDS = ['Trump', 'China', 'Amazon', 'Football', 'War', 'Google', 'Love', 'Facebook', 'Movie', 'Music']\nKAFKA_TOPIC = \"twitterstream\"\nKAFKA_PORT = [\"localhost:9092\"]\n\n\nclass TweetStreamListener(tweepy.StreamListener):\n def __init__(self, es):\n self.es = es\n self.rate = 0\n self.other = 0\n self.producer = KafkaProducer(bootstrap_servers=KAFKA_PORT)\n\n def on_data(self, data):\n try:\n cur_data = json.loads(data)\n location = cur_data['user']['location']\n if location:\n text = cur_data['text']\n keyword = getKeyWord(text)\n api_key = google_api\n coordinates = getCoordinates(api_key, location)\n timestamp = parser.parse(cur_data['created_at'])\n timestamp = timestamp.strftime('%Y-%m-%dT%H:%M:%SZ')\n author = cur_data['user']['screen_name']\n if (keyword and coordinates):\n mapping = {\n 'keyword': keyword,\n 'author': author,\n 'text': text,\n 'timestamp': timestamp,\n 'coordinates': coordinates,\n 'sentiment': None,\n }\n try:\n self.producer.send(topic=KAFKA_TOPIC, value=bytes(json.dumps(mapping)))\n print (\"Kafka Success!\")\n except Exception as e:\n print (e)\n pass\n else:\n print (\"Unstructured data! Pass!\")\n else:\n print (\"No location information! Pass!\")\n except Exception as e:\n print (e)\n\n def on_status(self, status):\n print (\"Status: \" + status.text)\n\n def on_error(self, status_code):\n print ('Error:', str(status_code))\n if status_code == 420:\n print (\"Rate Limited!\")\n sleepy = 60 * math.pow(2, self.rate)\n print (time.strftime(\"%Y%m%d_%H%M%S\"))\n print (\"A reconnection attempt will occur in \" + \\\n str(sleepy/60) + \" minutes.\")\n time.sleep(sleepy)\n self.rate += 1\n else:\n sleepy = 5 * math.pow(2, self.other)\n print (time.strftime(\"%Y%m%d_%H%M%S\"))\n print (\"A reconnection attempt will occur in \" + \\\n str(sleepy) + \" seconds.\")\n time.sleep(sleepy)\n self.other += 1\n return True\n\n def on_timeout(self):\n return True\n\n\ndef getCoordinates(api_key, location):\n api_key = api_key\n api_response = requests.get('https://maps.googleapis.com/maps/api/geocode/json?address={0}&key={1}'.format(location, api_key))\n api_response_dict = api_response.json()\n if api_response_dict['status'] == \"OK\":\n latitude = api_response_dict['results'][0]['geometry']['location']['lat']\n longitude = api_response_dict['results'][0]['geometry']['location']['lng']\n coordinates = [longitude, latitude]\n else:\n coordinates = None\n return coordinates\n\n\ndef getKeyWord(text):\n for keyword in FILTERED_KEYWORDS:\n if (keyword in text or keyword.lower() in text or keyword.upper() in text):\n keyword = keyword\n break\n else:\n keyword = None\n return keyword\n\n\ndef main():\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_secret)\n api = tweepy.API(auth)\n es = Elasticsearch(hosts=es_endpoint, port=443, use_ssl=True)\n tweetStreamListener = TweetStreamListener(es)\n \n while True:\n try:\n tweetStream = tweepy.Stream(auth=api.auth, listener=tweetStreamListener)\n tweetStream.filter(track=FILTERED_KEYWORDS)\n except IncompleteRead:\n # reconnect and keep trucking\n tweetStream.disconnect()\n continue\n except KeyboardInterrupt:\n # exit\n tweetStream.disconnect()\n break\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"twitter_streaming_kafka.py","file_name":"twitter_streaming_kafka.py","file_ext":"py","file_size_in_byte":4712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"94523510","text":"#!/usr/bin/env python3\nfrom ev3dev.ev3 import *\nimport time\n\n\ndef saturate(x, left, right):\n if x > right: x = right\n if x < left: x = left\n return x\n\n\nsound = Sound()\nsound.set_volume(100)\nsound.beep()\n\n# первая координата - motorA\n# вторая координата - motorB\n# третья координата - motorC\nq0 = [90, 45, 45]\nq = [5 * q0[0], -5 * q0[1], -5/3 * q0[2]]\n\n# значение коэффициентов в градусных мерах\nk_p = [0.3, 0.3, 0.1]\nk_i = [0.25/60, 0.25/60, 0]\nk_d = [1/60, 1/60, 0]\n\n\nmotorA = LargeMotor('outA')\nmotorB = LargeMotor('outB')\nmotorC = MediumMotor('outC')\n\nmotorA.position = 0\nmotorB.position = 0\nmotorC.position = 0\n\ntimeStart = time.time()\nlast_t = time.time()\nsum = 0\nlast_e = 0\ninaccuracy = 5 # погрешность в градусах\nU_max = 6.97\n\nname = str(q0[0]) + \"_\" + str(q0[1]) + \"_\" + str(q0[2]) + \".txt\"\nfile = open(name, 'w')\n\nmotors_set = [motorA, motorB, motorC]\n\nfor i in range(3):\n while abs(q[i] - motors_set[i].position) > inaccuracy:\n e = q[i] - motors_set[i].position\n dt = time.time() - last_t\n U = k_p[i] * e + k_d[i] * (e - last_e) / dt + k_i[i] * sum * dt\n U = U/U_max*100\n motors_set[i].run_direct(duty_cycle_sp=saturate(U, -100, 100))\n file.write(str(motorA.position) + '\\t' + str(motorB.position) + '\\t' + str(motorC.position) + '\\t' + str(\n saturate(U, -100, 100)) + '\\t' + str(k_p[i] * e) + '\\t' + str(k_d[i] * (e - last_e) / dt) + '\\t' +\n str(k_i[i] * sum * dt) + '\\n')\n sum += e\n last_e = e\n last_t = time.time()\n sum = 0\n last_e = 0\n last_t = time.time()\n motors_set[i].run_direct(duty_cycle_sp=0)\n\n\nfile.close()\n","sub_path":"OUT 18.05/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"199543761","text":"from lib.actions import CloudflareBaseAction\n\n\nclass GetZonesAction(CloudflareBaseAction):\n def run(self, **kwargs):\n \"\"\"\n Get Cloudflare DNS Zones\n\n Args:\n None.\n\n Raises:\n CloudFlareAPIError: On HTTP Error or Invaild JSON.\n\n Returns:\n dict: containing DNS zones\n \"\"\"\n return self.invoke(self.client.zones.get, # pylint: disable=no-member\n **kwargs)\n","sub_path":"actions/get_zones.py","file_name":"get_zones.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"355692706","text":"import unittest\nimport torch\nfrom pytorch_metric_learning.losses import LiftedStructureLoss, GeneralizedLiftedStructureLoss\nfrom pytorch_metric_learning.utils import common_functions as c_f\n\n\nclass TestLiftedStructure(unittest.TestCase):\n def test_lifted_structure_loss(self):\n neg_margin = 0.5\n loss_func = LiftedStructureLoss(neg_margin=neg_margin)\n\n embedding_angles = [0, 20, 40, 60, 80]\n embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings\n labels = torch.LongTensor([0, 0, 1, 1, 2])\n\n loss = loss_func(embeddings, labels)\n loss.backward()\n\n pos_pairs = [(0,1), (1,0), (2,3), (3,2)]\n neg_pairs = [(0,2), (0,3), (0,4), (1,2), (1,3), (1,4), (2,0), (2,1), (2,4), (3,0), (3,1), (3,4), (4,0), (4,1), (4,2), (4,3)]\n\n total_loss = 0\n for a1,p in pos_pairs:\n anchor, positive = embeddings[a1], embeddings[p]\n pos_pair_component = torch.sqrt(torch.sum((anchor-positive)**2))\n neg_pair_component = 0\n for a2,n in neg_pairs:\n negative = embeddings[n]\n if a2 == a1:\n neg_pair_component += torch.exp(neg_margin - torch.sqrt(torch.sum((anchor-negative)**2)))\n elif a2 == p:\n neg_pair_component += torch.exp(neg_margin - torch.sqrt(torch.sum((positive-negative)**2)))\n else:\n continue\n total_loss += torch.relu(torch.log(neg_pair_component) + pos_pair_component)**2\n \n total_loss /= 2*len(pos_pairs)\n\n self.assertTrue(torch.isclose(loss, total_loss))\n\n\n def test_with_no_valid_pairs(self):\n loss_func = LiftedStructureLoss(neg_margin=0.5)\n embedding_angles = [0]\n embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings\n labels = torch.LongTensor([0])\n loss = loss_func(embeddings, labels)\n loss.backward()\n self.assertEqual(loss, 0)\n\n\n\nclass TestGeneralizedLiftedStructureLoss(unittest.TestCase):\n def test_generalized_lifted_structure_loss(self):\n neg_margin = 0.5\n loss_func = GeneralizedLiftedStructureLoss(neg_margin=neg_margin)\n\n embedding_angles = [0, 20, 40, 60, 80]\n embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings\n labels = torch.LongTensor([0, 0, 1, 1, 2])\n\n loss = loss_func(embeddings, labels)\n loss.backward()\n\n pos_pairs = [(0,1), (1,0), (2,3), (3,2)]\n neg_pairs = [(0,2), (0,3), (0,4), (1,2), (1,3), (1,4), (2,0), (2,1), (2,4), (3,0), (3,1), (3,4), (4,0), (4,1), (4,2), (4,3)]\n\n correct_total = 0\n for i in range(len(embeddings)):\n correct_pos_loss = 0\n correct_neg_loss = 0\n for a,p in pos_pairs:\n if a == i:\n anchor, positive = embeddings[a], embeddings[p]\n correct_pos_loss += torch.exp(torch.sqrt(torch.sum((anchor-positive)**2)))\n if correct_pos_loss > 0:\n correct_pos_loss = torch.log(correct_pos_loss)\n\n for a,n in neg_pairs:\n if a == i:\n anchor, negative = embeddings[a], embeddings[n]\n correct_neg_loss += torch.exp(neg_margin - torch.sqrt(torch.sum((anchor-negative)**2)))\n if correct_neg_loss > 0:\n correct_neg_loss = torch.log(correct_neg_loss)\n\n correct_total += torch.relu(correct_pos_loss + correct_neg_loss)\n\n correct_total /= embeddings.size(0)\n\n self.assertTrue(torch.isclose(loss, correct_total))\n\n\n def test_with_no_valid_pairs(self):\n loss_func = GeneralizedLiftedStructureLoss(neg_margin=0.5)\n embedding_angles = [0]\n embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings\n labels = torch.LongTensor([0])\n loss = loss_func(embeddings, labels)\n loss.backward()\n self.assertEqual(loss, 0)","sub_path":"tests/losses/test_lifted_structure_loss.py","file_name":"test_lifted_structure_loss.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"443847867","text":"#Tic-Tac-Toe Game\n#Complete game functions\n\nimport random, sys, time, math, pygame\nfrom pygame.locals import *\nimport numpy as np\nimport copy\n\n#define window\n\n##set colors\n\nclass GameState:\n def __init__(self):\n global FPS_CLOCK, DISPLAYSURF, BASIC_FONT, TITLE_FONT, GAMEOVER_FONT\n\n pygame.init()\n FPS_CLOCK = pygame.time.Clock()\n\n DISPLAYSURF = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\n\n pygame.display.set_caption('TicTacToe')\n\n BASIC_FONT = pygame.font.Font('freesansbold.ttf', 16)\n TITLE_FONT = pygame.font.Font('freesansbold.ttf', 24)\n GAMEOVER_FONT = pygame.font.Font('freesansbold.ttf', 48)\n\n # Set initial parameters\n self.init = False\n self.num_mark = 0\n\n # No stone: 0, Black stone: 1, White stone = -1\n self.gameboard = np.zeros([GAMEBOARD_SIZE, GAMEBOARD_SIZE])\n\n self.x_win = 0\n self.o_win = 0\n self.count_draw = 0\n\n # black turn: 0, white turn: 1\n self.turn = 0\n\n # black wins: 1, white wins: 2, draw: 3, playing: 0\n self.win_index = 0\n\n # List of X coordinates and Y coordinates\n self.X_coord = []\n self.Y_coord = []\n\n for i in range(GAMEBOARD_SIZE):\n self.X_coord.append(\n MARGIN + i * int(GRID_SIZE / (GAMEBOARD_SIZE)) + int(\n GRID_SIZE / (GAMEBOARD_SIZE * 2)))\n self.Y_coord.append(\n TOP_MARGIN + i * int(GRID_SIZE / (GAMEBOARD_SIZE)) + int(\n GRID_SIZE / (GAMEBOARD_SIZE * 2)))\n\n def terminate(self):\n pygame.quit()\n sys.exit()\n\n def rule_msg(self):\n ruleSurf1 = BASIC_FONT.render('Win: O or X mark has to be 3 in a row',\n True, WHITE)\n ruleRect1 = ruleSurf1.get_rect()\n ruleRect1.topleft = (MARGIN, 50)\n DISPLAYSURF.blit(ruleSurf1, ruleRect1)\n\n ruleSurf2 = BASIC_FONT.render('(horizontal, vertical, diagonal)', True,\n WHITE)\n ruleRect2 = ruleSurf1.get_rect()\n ruleRect2.topleft = (MARGIN, 70)\n DISPLAYSURF.blit(ruleSurf2, ruleRect2)\n\n def score_msg(self):\n scoreSurf1 = BASIC_FONT.render('Score: ', True, WHITE)\n scoreRect1 = scoreSurf1.get_rect()\n scoreRect1.topleft = (MARGIN, 105)\n DISPLAYSURF.blit(scoreSurf1, scoreRect1)\n\n scoreSurf2 = BASIC_FONT.render('O = ' + str(self.o_win) + ' vs ',\n True, WHITE)\n scoreRect2 = scoreSurf2.get_rect()\n scoreRect2.topleft = (scoreRect1.midright[0], 105)\n DISPLAYSURF.blit(scoreSurf2, scoreRect2)\n\n scoreSurf3 = BASIC_FONT.render('X = ' + str(self.x_win) + ' vs ',\n True, WHITE)\n scoreRect3 = scoreSurf3.get_rect()\n scoreRect3.topleft = (scoreRect2.midright[0], 105)\n DISPLAYSURF.blit(scoreSurf3, scoreRect3)\n\n scoreSurf4 = BASIC_FONT.render('Draw = ' + str(self.count_draw), True,\n WHITE)\n scoreRect4 = scoreSurf4.get_rect()\n scoreRect4.topleft = (scoreRect3.midright[0], 105)\n DISPLAYSURF.blit(scoreSurf4, scoreRect4)\n\n def turn_msg(self):\n if self.turn == 0:\n turnSurf = BASIC_FONT.render(\"O's Turn!\", True, WHITE)\n turnRect = turnSurf.get_rect()\n turnRect.topleft = (MARGIN, 135)\n DISPLAYSURF.blit(turnSurf, turnRect)\n else:\n turnSurf = BASIC_FONT.render(\"X's Turn!\", True, WHITE)\n turnRect = turnSurf.get_rect()\n turnRect.topleft = (WINDOW_WIDTH - 75, 135)\n DISPLAYSURF.blit(turnSurf, turnRect)\n\n\n def check_win(self):\n # Check four stones in a row (Horizontal)\n for row in range(GAMEBOARD_SIZE):\n for col in range(GAMEBOARD_SIZE - WIN_MARK + 1):\n # Black win!\n if np.sum(self.gameboard[row, col:col + WIN_MARK]) == WIN_MARK:\n return 1\n # White win!\n if np.sum(self.gameboard[row, col:col + WIN_MARK]) == -WIN_MARK:\n return 2\n\n # Check four stones in a colum (Vertical)\n for row in range(GAMEBOARD_SIZE - WIN_MARK + 1):\n for col in range(GAMEBOARD_SIZE):\n # Black win!\n if np.sum(self.gameboard[row: row + WIN_MARK, col]) == WIN_MARK:\n return 1\n # White win!\n if np.sum(\n self.gameboard[row: row + WIN_MARK, col]) == -WIN_MARK:\n return 2\n\n # Check four stones in diagonal (Diagonal)\n for row in range(GAMEBOARD_SIZE - WIN_MARK + 1):\n for col in range(GAMEBOARD_SIZE - WIN_MARK + 1):\n count_sum = 0\n for i in range(WIN_MARK):\n if self.gameboard[row + i, col + i] == 1:\n count_sum += 1\n if self.gameboard[row + i, col + i] == -1:\n count_sum -= 1\n\n # Black Win!\n if count_sum == WIN_MARK:\n return 1\n\n # White WIN!\n if count_sum == -WIN_MARK:\n return 2\n\n for row in range(WIN_MARK - 1, GAMEBOARD_SIZE):\n for col in range(GAMEBOARD_SIZE - WIN_MARK + 1):\n count_sum = 0\n for i in range(WIN_MARK):\n if self.gameboard[row - i, col + i] == 1:\n count_sum += 1\n if self.gameboard[row - i, col + i] == -1:\n count_sum -= 1\n\n # Black Win!\n if count_sum == WIN_MARK:\n return 1\n\n # White WIN!\n if count_sum == -WIN_MARK:\n return 2\n\n # Draw (board is full)\n if self.num_mark == GAMEBOARD_SIZE * GAMEBOARD_SIZE:\n return 3\n\n return 0\n\n\nif __name__ == \"__main__\":\n pass","sub_path":"tic_tac_toe_module.py","file_name":"tic_tac_toe_module.py","file_ext":"py","file_size_in_byte":6053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"267727484","text":"from ResMaker.Package.ActionCount import *\r\nfrom ResMaker.Package.Actions import *\r\nimport copy\r\nfrom math import ceil\r\n\r\n\r\nclass HardwareComponent(action_counts):\r\n def __init__(self, name, actions):\r\n super().__init__(name)\r\n self.add_actions(actions)\r\n\r\n def add_action(self, args):\r\n super().add_action(arguments=args[0], name=args[1])\r\n\r\n def add_actions(self, actions):\r\n for action in copy.deepcopy(actions):\r\n self.add_action(action)\r\n\r\n\r\nclass SRAM(HardwareComponent):\r\n def __init__(self, name, size, bandwidth, n_banks, n_ports, actions):\r\n self.bandwidth = bandwidth\r\n self.n_banks = n_banks\r\n self.n_ports = n_ports\r\n self.size = size\r\n super().__init__(name, actions)\r\n\r\n\r\nclass RegFile(HardwareComponent):\r\n def __init__(self, name, size, bandwidth, n_banks, n_ports, actions):\r\n self.bandwidth = bandwidth\r\n self.n_banks = n_banks\r\n self.n_ports = n_ports\r\n self.size = size\r\n super().__init__(name, actions)\r\n\r\n\r\nclass Adders(HardwareComponent):\r\n def __init__(self, name, bandwidth, actions):\r\n self.bandwidth = bandwidth\r\n super().__init__(name, actions)\r\n\r\n\r\nclass Bitwise(HardwareComponent):\r\n def __init__(self, name, bandwidth, actions):\r\n self.bandwidth = bandwidth\r\n super().__init__(name, actions)\r\n\r\n\r\nclass PE:\r\n\r\n def __init__(self, name, components_names):\r\n self.sub_components_names = components_names\r\n self.sub_components = []\r\n self.name = name\r\n\r\n ##_____________ PE Components ___________##\r\n #pred spad\r\n self.prediction_buffer = SRAM(name=name+self.sub_components_names[len(self.sub_components)], size=0, bandwidth=8, n_banks=4, n_ports=2, actions=prediction_buffer_actions)\r\n self.sub_components.append(self.prediction_buffer)\r\n\r\n #fusion unit\r\n self.adders = Adders(name=name+self.sub_components_names[len(self.sub_components)], bandwidth=64, actions=adders_actions)\r\n self.sub_components.append(self.adders)\r\n\r\n self.bit_masks_and = Bitwise(name=name+self.sub_components_names[len(self.sub_components)], bandwidth=8, actions=and_gate_actions)\r\n self.sub_components.append(self.bit_masks_and)\r\n\r\n self.mux_ = Bitwise(name=name+self.sub_components_names[len(self.sub_components)], bandwidth=8, actions=and_gate_actions)\r\n self.sub_components.append(self.mux_)\r\n\r\n self.mux = Bitwise(name=name+self.sub_components_names[len(self.sub_components)], bandwidth=16, actions=and_gate_actions)\r\n self.sub_components.append(self.mux)\r\n\r\n self.components_dict = {}\r\n for component in self.sub_components_names:\r\n self.components_dict[component] = self.sub_components[self.sub_components_names.index(component)]\r\n\r\n def get_dict(self):\r\n dicts = []\r\n for i in range(len(self.sub_components)):\r\n component = self.sub_components[i]\r\n dicts.append(component.get_dict())\r\n return dicts\r\n\r\n def inc_action_count(self, sub_component, action, count):\r\n component = self.components_dict.get(sub_component)\r\n component.inc_action(action, count)\r\n\r\n\r\n\r\n def process_layer(self, layer, cycles, total_PE, PE_adders):\r\n\r\n prediction_buffer_writes = ceil(layer['midd_ch'] / self.prediction_buffer.bandwidth) * ceil((layer['pixels']**2)/total_PE) * 9\r\n prediction_buffer_reads = (PE_adders) * cycles\r\n\r\n self.prediction_buffer.inc_action(action_write, prediction_buffer_writes)\r\n self.prediction_buffer.inc_action(action_read, prediction_buffer_reads)\r\n\r\n total_adds = ceil(PE_adders * cycles)\r\n self.adders.inc_action(action_Add, total_adds)\r\n\r\n total_bit_masks = ceil(PE_adders * cycles)\r\n self.bit_masks_and.inc_action(1, total_bit_masks)\r\n\r\n total_mux_ = ceil(PE_adders * cycles)\r\n self.mux_.inc_action(1, total_mux_)\r\n\r\n\r\n total_mux = ceil(PE_adders * 2 * cycles)\r\n self.mux.inc_action(1, total_mux)\r\n\r\n\r\nclass Chip:\r\n\r\n def __init__(self, name, total_PE, PE_components):\r\n self.name = name\r\n self.total_PE = total_PE\r\n self.prediction_glb = SRAM(name=self.name+'pred_glb', size=0, bandwidth=64, n_banks=2, n_ports=1, actions=prediction_glb_actions)\r\n self.global_AND_gate = Bitwise(name=self.name+'binary_decoder', bandwidth=64, actions=and_gate_actions)\r\n self.PEs = []\r\n for PE_number in range(self.total_PE):\r\n self.PEs.append(PE(name=self.name+'PE['+str(PE_number) +']', components_names=PE_components))\r\n\r\n def process(self, layer, cycles, outer_M, PE_adders):\r\n prediction_glb_writes = ceil( (layer['midd_ch'] * ((layer['pixels'])*(layer['pixels'])) )/self.prediction_glb.bandwidth)*2\r\n prediction_glb_reads = prediction_glb_writes\r\n self.prediction_glb.inc_action(action_write, prediction_glb_writes)\r\n self.prediction_glb.inc_action(action_read, prediction_glb_reads)\r\n\r\n total_ands = ceil(cycles * 24 * PE_adders)\r\n self.global_AND_gate.inc_action(action_bitwise, total_ands)\r\n\r\n for PE in self.PEs:\r\n PE.process_layer(layer, cycles, self.total_PE, PE_adders)\r\n","sub_path":"mobilenet/cifar10/bottlenecks/ResMaker/Package/HardwareComponents.py","file_name":"HardwareComponents.py","file_ext":"py","file_size_in_byte":5262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"583883041","text":"\"\"\"\n@Time: 2021/3/2\n@Author:chenzhe\n\n\"\"\"\nfrom datetime import datetime\nfrom time import sleep\n\nimport yaml\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.remote.webdriver import WebDriver\n\nfrom webAutomation.seleniumPO.vkycui.common.log import Log\nfrom webAutomation.seleniumPO.vkycui.config.config import driver_path\n\nlogger = Log()\n\n\ndef my_print(msg):\n logger.info(msg)\n\n\nclass BasePage:\n _element_content = \"\"\n _error_count = 0\n _error_max = 10\n _params = {}\n\n def __init__(self, browser='ff', remote_address=None, driver: WebDriver = None):\n t1 = datetime.now()\n dc = {'platform': 'ANY', 'browserName': 'chrome', 'version': '', 'javascriptEnabled': True}\n if driver == None:\n if remote_address is None:\n if browser == \"firefox\" or browser == \"ff\":\n self.driver = webdriver.Firefox()\n elif browser == \"headless chrome\" or browser == \"headless Chrome\" or browser == \"headless_chrome\" or browser == \"headless_Chrome\":\n options = Options()\n options.add_argument('--headless')\n options.add_argument('--disable-gpu')\n self.driver = webdriver.Chrome(executable_path=driver_path, options=options)\n elif browser == \"chrome\" or browser == \"Chrome\":\n options = Options()\n # 用于浏览器复用\n # options.debugger_address = \"127.0.0.1:9999\"\n options.add_argument(\"--disable-infobars\")\n options.add_argument(\"start-maximized\")\n options.add_argument(\"--disable-extensions\")\n # 1-allow;2-disable 强制打开Chrome浏览器的mic和camera权限,不需要单独点击允许或拒绝\n options.add_experimental_option(\"prefs\", {\n \"profile.default_content_setting_values.media_stream_mic\": 1,\n \"profile.default_content_setting_values.media_stream_camera\": 1,\n \"profile.default_content_setting_values.geolocation\": 1,\n \"profile.default_content_setting_values.notifications\": 1\n })\n\n self.driver = webdriver.Chrome(executable_path=driver_path, options=options)\n elif browser == \"internet explorer\" or browser == \"ie\":\n self.driver = webdriver.Ie()\n elif browser == \"opera\":\n self.driver = webdriver.Opera()\n elif browser == \"phantomjs\":\n self.driver = webdriver.PhantomJS()\n elif browser == \"edge\":\n self.driver = webdriver.Edge()\n\n else:\n if browser == \"RChrome\":\n self.driver = webdriver.Remote(command_executor='http://' + remote_address + '/wd/hub',\n desired_capabilities=dc)\n elif browser == \"RIE\":\n dc['browserName'] = 'internet explorer'\n self.driver = webdriver.Remote(command_executor='http://' + remote_address + '/wd/hub',\n desired_capabilities=dc)\n elif browser == \"RFirefox\":\n dc['browserName'] = 'firefox'\n dc['marionette'] = False\n self.driver = webdriver.Remote(command_executor='http://' + remote_address + '/wd/hub',\n desired_capabilities=dc)\n try:\n self.driver.implicitly_wait(5)\n my_print(\n \"Success Start a new browser:{0} , Spend {1} seconds\".format(browser,\n (datetime.now() - t1).seconds))\n except Exception:\n raise NameError(\"Not found {0} browser,You can enter 'ie','ff',\"\n \"'chrome','RChrome','RIe' or 'RFirefox'.\".format(browser))\n\n else:\n self.driver = driver\n\n def quit_driver(self):\n self.driver.quit()\n\n def find(self, by, locator=None):\n # 查找并返回这个元素,10次还找不到就抛异常\n try:\n element = self.driver.find_elements(*by) if isinstance(by, tuple) else self.driver.find_element(by, locator)\n self._error_count = 0\n return element\n\n except Exception as e:\n self._error_count += 1\n if self._error_count >= self._error_max:\n raise e\n\n def steps(self, path, keyname):\n global element\n with open(path, encoding=\"utf-8\") as file:\n steps: list[dict] = yaml.safe_load(file)\n # print(steps)\n for step in steps:\n try:\n if step['elementname'] != keyname:\n continue\n else:\n if \"by\" in step.keys():\n element = self.find(step['by'], step['locator'])\n sleep(1)\n if \"action\" in step.keys():\n if \"click\" == step[\"action\"]:\n element.click()\n if \"send\" == step[\"action\"]:\n content: str = step[\"value\"]\n print(step['value'])\n for param in self._params:\n content = content.replace(\"{%s}\" % param, self._params[param])\n print(content)\n element.send_keys(content)\n sleep(1)\n if \"textContent\" == step[\"action\"]:\n self.element_content = element.text\n return self.element_content\n\n except Exception as e:\n my_print(\"Can not find this element\")\n","sub_path":"webAutomation/seleniumPO/vkycui/page/pageobject/base_page.py","file_name":"base_page.py","file_ext":"py","file_size_in_byte":6134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"116050592","text":"class Cannons:\r\n def __init__(self,name,can_range,can_damage,can_shots,can_special,cost):\r\n self.name = name\r\n self.can_range = can_range\r\n self.can_damage = can_damage\r\n self.can_shots = can_shots\r\n self.can_special = can_special\r\n self.cost = cost\r\n def card(self):\r\n name = \" \"+self.name+\" \"*(17-len(self.name)+1)\r\n if len(str(self.can_range)) == 2:\r\n cannon_range = \" \"+str(self.can_range)+\" \"\r\n else:\r\n cannon_range = \" \"+str(self.can_range)+\" \"\r\n \r\n if len(str(self.can_damage)) == 2:\r\n cannon_damage = \" \"+str(self.can_damage)+\" \"\r\n else:\r\n cannon_damage = \" \"+str(self.can_damage)+\" \"\r\n if (len(str(self.can_shots))) == 2:\r\n shots = \" \"+str(self.can_shots)+\" \"\r\n else:\r\n shots = \" \"+str(self.can_shots)+\" \"\r\n \r\n print(\"| Name | Range | Dmg | Shots | Special: |\")\r\n print(f\"|{name}|{cannon_range}|{cannon_damage}|{shots}|{self.can_special} |\")\r\n print(\"=\"*75)\r\n def get_cost(self):\r\n return self.cost\r\n def get_name(self):\r\n return self.name\r\n def get_range(self):\r\n return self.can_range\r\n","sub_path":"class_canons.py","file_name":"class_canons.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"93588307","text":"import smc.actions.search as search\nfrom smc.compat import min_smc_version\nfrom smc.elements.helpers import domain_helper\nfrom smc.base.model import Element, prepared_request, ResourceNotFound,\\\n SubElement, lookup_class\nfrom smc.api.exceptions import UnsupportedEngineFeature,\\\n UnsupportedInterfaceType, TaskRunFailed, EngineCommandFailed,\\\n SMCConnectionError, CertificateError, CreateElementFailed\nfrom smc.core.node import Node\nfrom smc.core.resource import Snapshot, PendingChanges\nfrom smc.core.interfaces import PhysicalInterface, \\\n VirtualPhysicalInterface, TunnelInterface, Interface\nfrom smc.administration.tasks import task_handler, Task\nfrom smc.elements.other import prepare_blacklist\nfrom smc.elements.network import Alias\nfrom smc.vpn.elements import VPNSite\nfrom smc.core.route import Antispoofing, Routing, Routes\nfrom smc.core.contact_address import ContactResource\nfrom smc.core.properties import EngineProperty\nfrom smc.elements.servers import LogServer\nfrom smc.base.collection import create_collection, sub_collection\n\n\nclass Engine(EngineProperty, Element):\n \"\"\"\n An engine is the top level representation of a firewall, IPS\n or virtualized software.\n\n Engine load can be called directly::\n\n >>> from smc.core.engine import Engine\n >>> engine = Engine('testfw')\n >>> print(engine.href)\n http://1.1.1.1:8082/6.1/elements/single_fw/39550\n\n Or load by calling collections (by firewall type)::\n\n >>> from smc.core.engines import Layer3Firewall\n >>> list(Layer3Firewall.objects.all())\n [Layer3Firewall(name=i-06145fc6c59a04335 (us-east-2a))]\n\n Or generic search for all::\n\n >>> list(Search('engine_clusters').objects.all())\n [Layer3Firewall(name=i-06145fc6c59a04335 (us-east-2a)), FirewallCluster(name=sg_vm),\n Layer3VirtualEngine(name=ve-5), MasterEngine(name=master-eng)]\n\n Instance resources:\n\n :ivar list nodes: :py:class:`smc.core.node.Node` nodes associated with\n this engine\n :ivar permissions: :py:class:`smc.administration.access_rights.AccessControlList`\n :ivar routing: :py:class:`smc.core.route.Routing` routing configuration hierarchy\n :ivar routing_monitoring: :py:class:`smc.core.route.Routes` current route table\n :ivar antispoofing: :py:class:`smc.core.route.Antispoofing` antispoofing interface\n configuration\n :ivar internal_gateway: :py:class:`~InternalGateway` engine\n level VPN settings\n :ivar virtual_resource: :py:class:`smc.core.engine.VirtualResource` for engine,\n only relevant to Master Engine\n :ivar interface: :py:class:`smc.core.interfaces.Interface` interfaces\n for this engine\n :ivar physical_interface: :py:class:`smc.core.interfaces.PhysicalInterface`\n access to physical interface settings\n :ivar tunnel_interface: :py:class:`smc.core.interfaces.TunnelInterface`\n retrieve or create tunnel interfaces\n :ivar snapshots: :py:class:`smc.core.engine.Snapshot` engine level policy\n snapshots\n \"\"\"\n typeof = 'engine_clusters'\n\n def __init__(self, name, **meta):\n super(Engine, self).__init__(name, **meta)\n\n @classmethod\n def _create(cls, name, node_type,\n physical_interfaces,\n nodes=1, log_server_ref=None,\n domain_server_address=None,\n enable_antivirus=False, enable_gti=False,\n sidewinder_proxy_enabled=False,\n default_nat=False, location_ref=None,\n enable_ospf=None, ospf_profile=None):\n \"\"\"\n Create will return the engine configuration as a dict that is a\n representation of the engine. The creating class will also add\n engine specific requirements before constructing the request\n and sending to SMC (which will serialize the dict to json).\n\n :param name: name of engine\n :param str node_type: comes from class attribute of engine type\n :param dict physical_interfaces: physical interface list of dict\n :param int nodes: number of nodes for engine\n :param str log_server_ref: href of log server\n :param list domain_server_address: dns addresses\n \"\"\"\n node_list = []\n for nodeid in range(1, nodes + 1): # start at nodeid=1\n node_list.append(Node._create(name, node_type, nodeid))\n\n domain_server_list = []\n if domain_server_address:\n for num, server in enumerate(domain_server_address):\n domain_server_list.append({\n 'rank': num, 'value': server})\n\n # Set log server reference, if not explicitly provided\n if not log_server_ref and node_type is not 'virtual_fw_node':\n for log_server in list(LogServer.objects.limit(1)):\n log_server_ref = log_server.href\n\n base_cfg = {\n 'name': name,\n 'nodes': node_list,\n 'domain_server_address': domain_server_list,\n 'log_server_ref': log_server_ref,\n 'physicalInterfaces': physical_interfaces}\n\n if enable_antivirus:\n antivirus = {\n 'antivirus': {\n 'antivirus_enabled': True,\n 'antivirus_update': 'daily',\n 'virus_log_level': 'stored',\n 'virus_mirror': 'update.nai.com/Products/CommonUpdater'}}\n base_cfg.update(antivirus)\n\n if enable_gti:\n gti = {'gti_settings': {\n 'file_reputation_context': 'gti_cloud_only'}}\n base_cfg.update(gti)\n\n if min_smc_version(6.1):\n if sidewinder_proxy_enabled:\n base_cfg.update(sidewinder_proxy_enabled=True)\n\n if default_nat:\n base_cfg.update(default_nat=True)\n\n if location_ref:\n base_cfg.update(location_ref=location_ref)\n\n if enable_ospf:\n if not ospf_profile: # get default profile\n ospf_profile = search.get_ospf_default_profile()\n ospf = {'dynamic_routing': {\n 'ospfv2': {\n 'enabled': True,\n 'ospfv2_profile_ref': ospf_profile}\n }}\n base_cfg.update(ospf)\n\n return base_cfg\n\n @property\n def type(self):\n if not self.meta:\n self.cache()\n return self.meta.type\n\n @property\n def version(self):\n \"\"\"\n Version of this engine\n \n :rtype: str\n \"\"\"\n return self.attr_by_name('engine_version')\n\n def rename(self, name):\n \"\"\"\n Rename the firewall engine, nodes, and internal gateway (VPN gw)\n\n :return: None\n \"\"\"\n for node in self.nodes:\n node.rename(name)\n try:\n del self.cache\n except AttributeError:\n pass\n self.data['name'] = '{}'.format(name)\n self._name = self.data.get('name')\n self.update()\n self.internal_gateway.rename(name)\n\n @property\n def nodes(self):\n \"\"\"\n Return a list of child nodes of this engine. This can be\n used to iterate to obtain access to node level operations\n\n :return: nodes for this engine\n :rtype: list(Node)\n \"\"\"\n return list(sub_collection(self.resource.nodes, Node))\n\n @property\n def permissions(self):\n \"\"\"\n Retrieve the permissions for this engine instance.\n ::\n\n for acl in engine.permissions:\n print(acl, acl.granted_element)\n\n :return: access control list permissions\n :rtype: list(AccessControlList)\n \"\"\"\n try:\n acls = self.resource.get('permissions')\n return [Element.from_href(acl)\n for acl in acls['granted_access_control_list']]\n\n except ResourceNotFound:\n raise UnsupportedEngineFeature(\n 'Engine permissions are only supported when using SMC API '\n 'version 6.1 and newer.')\n\n @property\n def pending_changes(self):\n \"\"\"\n Pending changes provides insight into changes on an engine that are\n pending approval or disapproval. Feature requires SMC >= v6.2.\n\n :raises UnsupportedEngineFeature: if SMC is not\n version >= 6.2 or the engine type doesn't support pending changes\n :return: :py:class:`smc.core.resources.PendingChanges`\n \"\"\"\n try:\n if self.resource.pending_changes:\n return PendingChanges(self.resource)\n except ResourceNotFound:\n raise UnsupportedEngineFeature(\n 'Pending changes is an unsupported feature '\n 'on this engine: {}'.format(self.type))\n\n def alias_resolving(self):\n \"\"\"\n Alias definitions with resolved values as defined on this engine.\n Aliases can be used in rules to simplify multiple object creation\n ::\n\n print(list(engine.alias_resolving()))\n\n :return: generator :py:class:`smc.elements.network.Alias`\n \"\"\"\n for alias in self.resource.get('alias_resolving'):\n yield Alias.load(alias)\n\n def blacklist(self, src, dst, duration=3600):\n \"\"\"\n Add blacklist entry to engine node by name. For blacklist to work,\n you must also create a rule with action \"Apply Blacklist\".\n\n :param str src: source to blacklist, can be /32 or network cidr\n :param str dst: dest to deny to, 0.0.0.0/32 indicates all destinations\n :param int duration: how long to blacklist in seconds\n :raises EngineCommandFailed: blacklist failed during apply\n :return: None\n \"\"\"\n prepared_request(\n EngineCommandFailed,\n href=self.resource.blacklist,\n json=prepare_blacklist(src, dst, duration)\n ).create()\n\n def blacklist_flush(self):\n \"\"\"\n Flush entire blacklist for engine\n\n :raises EngineCommandFailed: flushing blacklist failed with reason\n :return: None\n \"\"\"\n prepared_request(\n EngineCommandFailed,\n href=self.resource.flush_blacklist\n ).delete()\n\n def add_route(self, gateway, network):\n \"\"\"\n Add a route to engine. Specify gateway and network.\n If this is the default gateway, use a network address of\n 0.0.0.0/0.\n\n .. note: This will fail if the gateway provided does not have a\n corresponding interface on the network.\n\n :param str gateway: gateway of an existing interface\n :param str network: network address in cidr format\n :raises EngineCommandFailed: invalid route, possibly no network\n :return: None\n \"\"\"\n prepared_request(\n EngineCommandFailed,\n href=self.resource.add_route,\n params={'gateway': gateway,\n 'network': network}\n ).create()\n\n @property\n def routing(self):\n \"\"\"\n Find all routing nodes within engine::\n\n for routing in engine.routing.all():\n for routes in routing:\n ...\n\n Or just retrieve a routing configuration for a single\n interface::\n\n interface = engine.routing.get(0)\n\n :return: :py:class:`smc.core.route.Routing` element\n \"\"\"\n href = self.resource.routing\n return Routing(href=href,\n data=self.resource.get(href))\n\n @property\n def routing_monitoring(self):\n \"\"\"\n Return route table for the engine, including\n gateway, networks and type of route (dynamic, static).\n Calling this can take a few seconds to retrieve routes\n from the engine.\n\n Find all routes for engine resource::\n\n engine = Engine('myengine')\n for route in engine.routing_monitoring.all():\n print route\n\n :raises EngineCommandFailed: routes cannot be retrieved\n :return: list :py:class:`smc.core.route.Routes`\n \"\"\"\n try:\n result = prepared_request(\n EngineCommandFailed,\n href=self.resource.routing_monitoring\n ).read()\n return Routes(result.json)\n except SMCConnectionError:\n raise EngineCommandFailed('Timed out waiting for routes')\n\n @property\n def antispoofing(self):\n \"\"\"\n Antispoofing interface information. By default is based on routing\n but can be modified.\n ::\n\n for entry in engine.antispoofing.all():\n print(entry)\n\n :return: :py:class:`smc.core.route.Antispoofing`\n \"\"\"\n href = self.resource.antispoofing\n return Antispoofing(href=href,\n data=self.resource.get(href))\n\n @property\n def internal_gateway(self):\n \"\"\"\n Engine level VPN gateway information. This is a link from\n the engine to VPN level settings like VPN Client, Enabling/disabling\n an interface, adding VPN sites, etc.\n\n :raises UnsupportedEngineFeature: engine type does not have an internal\n gateway\n :return: :py:class:`~InternalGateway`\n \"\"\"\n try:\n result = self.resource.get('internal_gateway')\n if result:\n return InternalGateway(**result.pop())\n\n except ResourceNotFound:\n raise UnsupportedEngineFeature(\n 'This engine does not support an internal gateway for VPN, '\n 'engine type: {}'.format(self.type))\n\n @property\n def virtual_resource(self):\n \"\"\"\n Available on a Master Engine only.\n\n To get all virtual resources call::\n\n engine.virtual_resource.all()\n\n :raises UnsupportedInterfaceType: master engine only\n :return: collection of `.VirtualResource`\n :rtype: SubElementCollection\n \"\"\"\n try:\n return create_collection(\n self.resource.virtual_resources,\n VirtualResource)\n\n except ResourceNotFound:\n raise UnsupportedEngineFeature(\n 'This engine does not support virtual resources; engine '\n 'type: {}'.format(self.type))\n\n @property\n def contact_addresses(self):\n \"\"\"\n All available interfaces that can have contact adresses assigned.\n Only supported with SMC >= 6.2.\n ::\n\n interface1 = engine.contact_addresses(1) # For interface 1\n for ipv4 in interface1:\n if ipv4.address == '2.2.2.2':\n contact = ContactAddress.create('10.10.10.10', location='Default')\n ipv4.add_contact_address(contact)\n\n print(list(engine.contact_addresses)) # list all\n\n for interfaces in engine.contact_addresses.all(): #iterate all\n print(interfaces) #ContactInterface\n\n .. seealso:: :py:class:`smc.core.contact_address.ContactAddress`\n\n :return: list :py:class:`smc.core.contact_address.ContactInterface`\n \"\"\"\n return ContactResource(\n self.resource.get(self.resource.contact_addresses))\n\n @property\n def interface(self):\n \"\"\"\n Get all interfaces, including non-physical interfaces such\n as tunnel or capture interfaces. These are returned as Interface\n objects and can be used to load specific interfaces to modify, etc.\n ::\n\n for interfaces in engine.interface.all():\n ......\n\n :return: :py:class:`smc.core.interfaces.Interface`\n\n See :py:class:`smc.core.interfaces.Interface` for more info\n \"\"\"\n return Interface(\n parent=self.resource.interfaces,\n engine=self)\n\n @property\n def physical_interface(self):\n \"\"\"\n Returns a PhysicalInterface. This property can be used to\n add physical interfaces to the engine. For example::\n\n engine.physical_interface.add_single_node_interface(....)\n engine.physical_interface.add_node_interface(....)\n\n :raises UnsupportedInterfaceType: engine doesn't support this type\n :return: :py:class:`smc.core.interfaces.PhysicalInterface`\n \"\"\"\n try:\n return PhysicalInterface(\n parent=self.resource.physical_interface,\n engine=self)\n except ResourceNotFound:\n raise UnsupportedInterfaceType(\n 'Engine type: {} does not support the physical interface '\n 'type'.format(self.type))\n\n @property\n def virtual_physical_interface(self):\n \"\"\" Master Engine virtual instance only\n\n A virtual physical interface is for a master engine virtual instance.\n This interface type is just a subset of a normal physical interface\n but for virtual engines. This interface only sets Auth_Request and\n Outgoing on the interface.\n\n To view all interfaces for a virtual engine::\n\n for intf in engine.virtual_physical_interface.all():\n print(intf)\n\n :raises UnsupportedInterfaceType: virtual engines only\n :return: :py:class:`smc.core.interfaces.VirtualPhysicalInterface`\n \"\"\"\n try:\n return VirtualPhysicalInterface(\n parent=self.resource.virtual_physical_interface,\n engine=self)\n except ResourceNotFound:\n raise UnsupportedInterfaceType(\n 'Only virtual engines support the virtual physical '\n 'interface type. Engine type is: {}'.format(self.type))\n\n @property\n def tunnel_interface(self):\n \"\"\"\n Get only tunnel interfaces for this engine node.\n\n :raises UnsupportedInterfaceType: layer 3 engine's only\n :return: :py:class:`smc.core.interfaces.TunnelInterface`\n \"\"\"\n try:\n return TunnelInterface(\n parent=self.resource.tunnel_interface,\n engine=self)\n except ResourceNotFound:\n raise UnsupportedInterfaceType(\n 'Tunnel interfaces are only supported on layer 3 single '\n 'engines or clusters; Engine type is: {}'.format(self.type))\n\n @property\n def modem_interface(self):\n \"\"\"\n Get only modem interfaces for this engine node.\n\n :return: list of dict entries with href,name,type, or None\n \"\"\"\n try:\n return self.resource.get('modem_interface')\n except ResourceNotFound:\n raise UnsupportedInterfaceType(\n 'Modem interfaces are not supported on this engine type: {}'\n .format(self.type))\n\n @property\n def adsl_interface(self):\n \"\"\"\n Get only adsl interfaces for this engine node.\n\n :return: list of dict entries with href,name,type, or None\n \"\"\"\n try:\n return self.resource.get('adsl_interface')\n except ResourceNotFound:\n raise UnsupportedInterfaceType(\n 'ADSL interfaces are not supported on this engine type: {}'\n .format(self.type))\n\n @property\n def wireless_interface(self):\n \"\"\"\n Get only wireless interfaces for this engine node.\n\n :return: list of dict entries with href,name,type, or None\n \"\"\"\n try:\n return self.resource.get('wireless_interface')\n except ResourceNotFound:\n raise UnsupportedInterfaceType(\n 'Wireless interfaces are not supported on this engine type: '\n '{}'.format(self.type))\n\n @property\n def switch_physical_interface(self):\n \"\"\"\n Get only switch physical interfaces for this engine node.\n\n :return: list of dict entries with href,name,type, or None\n \"\"\"\n try:\n return self.resource.get('switch_physical_interface')\n except ResourceNotFound:\n raise UnsupportedInterfaceType(\n 'Switch interfaces are not supported on this engine type: {}'\n .format(self.type))\n\n def refresh(self, wait_for_finish=True, sleep=3):\n \"\"\"\n Refresh existing policy on specified device. This is an asynchronous\n call that will return a 'follower' link that can be queried to\n determine the status of the task.\n\n Last yield is result href; if wait_for_finish=False, the only yield is\n the follower href::\n\n task = engine.refresh()\n for message in task:\n print message\n\n :param bool wait_for_finish: whether to wait in a loop until the upload\n completes\n :param int sleep: number of seconds to sleep if wait_for_finish=True\n :raises TaskRunFailed: refresh failed, possibly locked policy\n :return: generator yielding updates on progress\n \"\"\"\n element = prepared_request(\n TaskRunFailed,\n href=self.resource.refresh\n ).create()\n\n return task_handler(\n Task(**element.json),\n wait_for_finish=wait_for_finish,\n sleep=sleep)\n\n def upload(self, policy=None, wait_for_finish=False, sleep=3):\n \"\"\"\n Upload policy to engine. This is used when a new policy is required\n for an engine, or this is the first time a policy is pushed to an\n engine.\n If an engine already has a policy and the intent is to re-push, then\n use :py:func:`refresh` instead.\n The policy argument can use a wildcard * to specify in the event a full\n name is not known::\n\n engine = Engine('myfw')\n task = engine.upload('Amazon*', wait_for_finish=True)\n for message in task:\n print message\n\n :param str policy: name of policy to upload to engine; if None, current\n policy\n :param bool wait_for_finish: whether to wait for async responses\n :param int sleep: number of seconds to sleep if wait_for_finish=True\n :raises TaskRunFailed: upload failed with reason\n :return: generator yielding updates on progress\n \"\"\"\n element = prepared_request(\n TaskRunFailed,\n href=self.resource.upload,\n params={'filter': policy}\n ).create()\n\n return task_handler(\n Task(**element.json),\n wait_for_finish=wait_for_finish,\n sleep=sleep)\n\n def generate_snapshot(self, filename='snapshot.zip'):\n \"\"\"\n Generate and retrieve a policy snapshot from the engine\n This is blocking as file is downloaded\n\n :param str filename: name of file to save file to, including directory\n path\n :raises EngineCommandFailed: snapshot failed, possibly invalid filename\n specified\n :return: None\n \"\"\"\n try:\n prepared_request(\n EngineCommandFailed,\n href=self.resource.generate_snapshot,\n filename=filename\n ).read()\n except IOError as e:\n raise EngineCommandFailed(\n 'Generate snapshot failed: {}'.format(e))\n\n @property\n def snapshots(self):\n \"\"\"\n References to policy based snapshots for this engine, including\n the date the snapshot was made\n\n :raises EngineCommandFailed: failure downloading, or IOError\n :return: collection of :class:`smc.core.resource.Snapshot`\n :rtype: SubElementCollection\n \"\"\"\n return sub_collection(\n self.resource.snapshots, Snapshot)\n\n def __unicode__(self):\n return u'{0}(name={1})'.format(\n lookup_class(self.type).__name__, self.name)\n\n\nclass InternalGateway(SubElement):\n \"\"\"\n InternalGateway represents the engine side VPN configuration\n This defines settings such as setting VPN sites on protected\n networks and engine level certificates.\n\n Since each engine has only one internal gateway, this resource\n is loaded immediately when called through engine.internal_gateway\n\n List endpoints where VPN can be enabled::\n\n list(engine.internal_gateway.internal_endpoint.all())\n\n \"\"\"\n\n def __init__(self, **meta):\n super(InternalGateway, self).__init__(**meta)\n\n def rename(self, name):\n self.data['name'] = name = '{} Primary'.format(name)\n self.update()\n\n @property\n def vpn_site(self):\n \"\"\"\n Retrieve VPN Site information for this internal gateway\n\n Find all configured sites for engine::\n\n for site in engine.internal_gateway.vpn_site.all():\n print site\n\n :return: collection of :class:`smc.vpn.elements.VPNSite`\n :rtype: SubElementCollection\n \"\"\"\n return create_collection(\n self.resource.vpn_site, VPNSite)\n\n @property\n def internal_endpoint(self):\n \"\"\"\n Internal Endpoints define and enable VPN settings on a\n specific interface.\n\n Find all internal endpoints for an engine::\n\n for x in engine.internal_gateway.internal_endpoint.all():\n print x\n\n :return: collection of :class:`.InternalEndpoint`\n :rtype: SubElementCollection\n \"\"\"\n return sub_collection(\n self.resource.internal_endpoint,\n InternalEndpoint)\n\n def gateway_certificate(self):\n \"\"\"\n :return: list\n \"\"\"\n return self.resource.get('gateway_certificate')\n\n def gateway_certificate_request(self):\n \"\"\"\n :return: list\n \"\"\"\n return self.resource.get('gateway_certificate_request')\n\n def generate_certificate(self, certificate_request):\n \"\"\"\n Generate an internal gateway certificate used for VPN on this engine.\n Certificate request should be an instance of VPNCertificate.\n\n :param: VPNCertificate certificate_request: CSR generated to provide\n a valid certificate\n :raises CertificateError: error generating certificate\n :return: None\n \"\"\"\n prepared_request(\n CertificateError,\n href=self.resource.generate_certificate,\n json=vars(certificate_request)\n ).create()\n\n\nclass InternalEndpoint(SubElement):\n \"\"\"\n An Internal Endpoint is an interface mapping that enables VPN on the\n associated interface.\n This also defines what type of VPN to enable such as IPSEC, SSL VPN,\n or SSL VPN Portal. \n\n To see all available internal endpoint (VPN gateways) on a particular\n engine, use an engine reference::\n\n >>> engine = Engine('sg_vm')\n >>> for e in list(engine.internal_gateway.internal_endpoint):\n ... print(e)\n ...\n InternalEndpoint(name=10.0.0.254)\n InternalEndpoint(name=172.18.1.254)\n \n Each property defines an attribute that can be modified. The property name\n maps to the attribute name and return value to the type. For example, to \n specify custom endpoint settings::\n \n vpn.modify_attribute(\n enabled=True,\n nat_t=True,\n force_nat_t=True,\n ssl_vpn_portal=False,\n ssl_vpn_tunnel=True,\n ipsec_vpn=True)\n \"\"\"\n\n def __init__(self, **meta):\n super(InternalEndpoint, self).__init__(**meta)\n\n @property\n def enabled(self):\n \"\"\"\n Is this VPN endpoint enabled\n \n :return: True, False\n :rtype: boolean\n \"\"\"\n return self.data.get('enabled')\n\n @property\n def force_nat_t(self):\n \"\"\"\n Is force NAT-T enabled\n \n :return: True, False\n :rtype: boolean\n \"\"\"\n return self.data.get('force_nat_t')\n \n @property\n def nat_t(self):\n \"\"\"\n Is NAT-T enabled\n \n :return: True, False\n :rtype: boolean\n \"\"\"\n return self.data.get('nat_t')\n\n @property\n def ssl_vpn_portal(self):\n \"\"\"\n Whether SSL VPN portal is enabled\n \n :return: True, False\n :rtype: boolean\n \"\"\"\n return self.data.get('ssl_vpn_portal')\n \n @property\n def ssl_vpn_tunnel(self):\n \"\"\"\n Whether SSL VPN Tunnel is enabled\n \n :return: True, False\n :rtype: boolean\n \"\"\"\n return self.data.get('ssl_vpn_tunnel')\n \n @property\n def ipsec_vpn(self):\n \"\"\"\n Whether IPSEC vpn is enabled on this VPN interface\n \n :return: True, False\n :rtype: boolean\n \"\"\"\n return self.data.get('ipsec_vpn')\n \n @property\n def physical_interface(self):\n \"\"\"\n Read-only referenced physical interface for this endpoint.\n \"\"\"\n pass\n\n\nclass VirtualResource(SubElement):\n \"\"\"\n A Virtual Resource is a container placeholder for a virtual engine\n within a Master Engine. When creating a virtual engine, each virtual\n engine must have a unique virtual resource for mapping. The virtual\n resource has an identifier (vfw_id) that specifies the engine ID for\n that instance.\n\n This is called as a resource of an engine. To view all virtual\n resources::\n\n list(engine.virtual_resource.all())\n \"\"\"\n\n def __init__(self, **meta):\n super(VirtualResource, self).__init__(**meta)\n\n def create(self, name, vfw_id, domain='Shared Domain',\n show_master_nic=False, connection_limit=0):\n \"\"\"\n Create a new virtual resource. Called through engine\n reference::\n\n engine.virtual_resource.create(....)\n\n :param str name: name of virtual resource\n :param int vfw_id: virtual fw identifier\n :param str domain: name of domain to install, (default Shared)\n :param bool show_master_nic: whether to show the master engine NIC ID's\n in the virtual instance\n :param int connection_limit: whether to limit number of connections for\n this instance\n :return: href of new virtual resource\n :rtype: str\n \"\"\"\n allocated_domain = domain_helper(domain)\n json = {'name': name,\n 'connection_limit': connection_limit,\n 'show_master_nic': show_master_nic,\n 'vfw_id': vfw_id,\n 'allocated_domain_ref': allocated_domain}\n\n return prepared_request(\n CreateElementFailed,\n href=self.href,\n json=json\n ).create().href\n\n @property\n def allocated_domain_ref(self):\n \"\"\"\n Domain that this virtual engine is allocated in. 'Shared Domain' is\n is the default if no domain is specified.\n ::\n\n >>> for resource in engine.virtual_resource:\n ... resource, resource.allocated_domain_ref\n ... \n (VirtualResource(name=ve-1), AdminDomain(name=Shared Domain))\n (VirtualResource(name=ve-8), AdminDomain(name=Shared Domain))\n\n :return: AdminDomain element\n :rtype: AdminDomain\n \"\"\"\n return Element.from_href(self.data.get('allocated_domain_ref'))\n\n @property\n def connection_limit(self):\n \"\"\"\n Maximum connections allowed by this virtual engine\n\n :return: connection limit\n :rtype: int\n \"\"\"\n return self.data.get('connection_limit')\n\n @property\n def vfw_id(self):\n \"\"\"\n Virtual fw identifier. This is unique per virtual engine.\n\n :return: vfw id\n :rtype: int\n \"\"\"\n return self.data.get('vfw_id')\n\n @property\n def show_master_nic(self):\n \"\"\"\n Show the Physical Interface IDs of the Master NGFW Engine in the\n interface properties of the Virtual NGFW Engine.\n\n :return: True, False if engine can see master engine nic order\n :rtype: bool\n \"\"\"\n return self.data.get('show_master_nic')\n","sub_path":"smc/core/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":31942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"78428658","text":"from config import ClientConfig\nfrom config import ReplicaConfig\n\n\ndef parse_config_file(filename):\n config = {}\n with open(filename, 'r') as f:\n for line in f:\n if line[0] == '#':\n continue\n (key, sep, val) = line.partition('=')\n # if the line does not contain '=', it is invalid and hence ignored\n if len(sep) != 0:\n val = val.strip()\n config[key.strip()] = val\n\n return config\n","sub_path":"src/multihost/read_config.py","file_name":"read_config.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"398677398","text":"\nfrom datetime import datetime\nfrom utils.file_io import join, create_join, listdir, isdir, isfile, remove, rm_rf, rename\nfrom utils.file_io import DelayedKeyboardInterrupt, copy_with_prefix_and_rename, basename\nfrom utils.yaml_io import load_yaml, save_yaml\nfrom utils.param_ops import zip_nt_params, dict_print, change_key, unzip_nt_params\nfrom sys import stderr\nfrom itertools import count\nfrom math import isnan\nimport torch\n\n_rt_file = 'register_and_tests.yaml'\n_rt_lock = 'register_and_tests.lock'\n_sv_file = 'settings_and_validation.yaml'\n_sv_lock = 'settings_and_validation.lock'\n\ndef _rt_file_lock(task_dir):\n rt_file = join(task_dir, _rt_file)\n rt_lock = join(task_dir, _rt_lock)\n return rt_file, rt_lock\n\ndef _sv_file_lock(instance_dir):\n sv_file = join(instance_dir, _sv_file)\n sv_lock = join(instance_dir, _sv_lock)\n return sv_file, sv_lock\n\nclass Recorder:\n '''A Recorder provides environment for an Operator, created in a Manager, operated by the Operator.'''\n \n def __init__(self, task_dir, task_module, config_dict_or_instance, instance_name = None, keep_top_k = 4, evalb = None, read_only = False):\n # with DelayedKeyboardInterrupt():\n new_instance = isinstance(config_dict_or_instance, dict)\n if read_only:\n assert not new_instance, 'parallelism of optuna trials should be based on a trained instance'\n self._sv_unlock = None # !!! an inactive recorder !!!\n\n rt_file, rt_lock = _rt_file_lock(task_dir)\n if new_instance:\n rt, unlock = load_yaml(rt_file, rt_lock, True)\n if len(rt):\n name_len = max(len(i) for i in rt.keys())\n inames = tuple(int(i) for i in rt.keys())\n for instance in count():\n if instance in inames:\n continue\n break\n else:\n instance = 0\n name_len = 1\n instance = str(instance)\n if len(instance) < name_len:\n instance = '0' * (name_len - len(instance)) + instance\n rt[instance] = {}\n unlock()\n save_yaml(rt, rt_file, rt_lock) # final confirm\n if instance_name:\n instance_dir = f'{instance}.{instance_name}'\n else:\n instance_dir = instance\n instance_dir = create_join(task_dir, instance_dir)\n config_dict_or_instance['results'] = {}\n sv_file, sv_lock = _sv_file_lock(instance_dir)\n save_yaml(config_dict_or_instance, sv_file, sv_lock)\n else:\n rt = load_yaml(rt_file, rt_lock)\n for instance_dir in listdir(task_dir):\n instance = instance_dir.split('.')[0]\n if instance.isdigit() and int(instance) == int(config_dict_or_instance):\n break\n instance = None\n assert instance in rt, f'instance {config_dict_or_instance} not registered.'\n instance_dir = create_join(task_dir, instance_dir)\n sv_file, sv_lock = _sv_file_lock(instance_dir)\n assert isfile(sv_file), f\"'{sv_file}' is not found.\"\n\n self._instance_dir = instance, instance_dir\n self._module = task_module\n self._ckpt_fname = join(instance_dir, 'checkpoint')\n self._model_dir = create_join(instance_dir, 'models')\n if not read_only:\n _, self._sv_unlock = load_yaml(sv_file, sv_lock, True)\n self._rt_file_lock = rt_file, rt_lock\n self._sv_file_lock = sv_file, sv_lock\n self._key = None\n self._writer = None\n self._keep_top_k = keep_top_k\n self._evalb = evalb\n self.log(datetime.now())\n\n def new_trial_recorder(self, specs_update_fn, trial):\n _, instance_dir = self._instance_dir\n specs = load_yaml(*self._sv_file_lock, wait_lock = False)\n results = specs.pop('results')\n trial_name = specs_update_fn(specs, trial)\n best_model = max(results, key = lambda x: results[x])\n child_recorder = Recorder(create_join(instance_dir, 'trials'), self._module, specs, trial_name, 1, self._evalb)\n _, child_dir = child_recorder._instance_dir\n self.log(f'New trial {child_dir} from best model {best_model}')\n copy_with_prefix_and_rename(join(instance_dir, 'models', best_model), child_dir, 'checkpoint')\n return child_recorder\n\n def summary_trials(self): # should only be a super_recorder\n if self._sv_unlock is None: # inactive recorder should only leave to the active one\n return False\n _, instance_dir = self._instance_dir\n children = load_yaml(*_rt_file_lock(instance_dir))\n best_child = max(children, key = lambda cid: children[cid]['key'])\n for fname in listdir(join(instance_dir, 'trials')):\n if '.' in fname:\n thatsit = fname.split('.')[0] == best_child\n else:\n thatsit = fname == best_child\n if thatsit:\n child_specs = load_yaml(*_sv_file_lock(join(instance_dir, 'trials', fname)))\n child_results = child_specs['results']\n best_model = max(child_results, key = lambda x: child_results[x])\n best_fpath = join(instance_dir, 'trials', fname, 'models', best_model)\n \n specs = load_yaml(*self._sv_file_lock, wait_lock = False)\n results = specs['results']\n results[best_model] = child_results[best_model]\n copy_with_prefix_and_rename(best_model, self._model_dir, best_model)\n \n weakest_model = min(results, key = lambda x: results[x])\n remove(join(self._model_dir, weakest_model))\n results.pop(weakest_model)\n\n self.log(' Replace the worst model', weakest_model, 'with the best model from trial', best_child, best_model)\n save_yaml(specs, *self._sv_file_lock, wait_lock = False)\n return True\n return False\n\n def detach(self):\n if self._sv_unlock is not None:\n self._sv_unlock()\n\n def delete_all(self):\n instance, instance_dir = self._instance_dir\n rt = load_yaml(*self._rt_file_lock)\n rt.pop(instance)\n save_yaml(rt, *self._rt_file_lock)\n rm_rf(instance_dir, stderr)\n\n def delete_most(self):\n instance, instance_dir = self._instance_dir\n remove(join(instance_dir, 'checkpoint'))\n with open(join(instance_dir, 'experiment.log'), 'a+') as fw:\n for fname in listdir(instance_dir):\n fpath = join(instance_dir, fname)\n if isdir(fpath):\n rm_rf(fpath, fw)\n\n def log(self, *args, **kwargs):\n _, instance_dir = self._instance_dir\n with open(join(instance_dir, 'experiment.log'), 'a+') as fw:\n kwargs['flush'] = True\n kwargs['file'] = fw\n print(*args, **kwargs)\n\n def init_tensorboard(self):\n try:\n from torch.utils.tensorboard import SummaryWriter\n except ImportError:\n from utils.shell_io import byte_style\n Recorder.msg(byte_style('(tensorboard is not installed; not tracking training statistics)', '3'))\n SummaryWriter = None\n if SummaryWriter is not None:\n self._writer = SummaryWriter(self.create_join('train'))\n\n def tensorboard(self, step, template, **kwargs):\n if self._writer is None:\n return\n for key, value in kwargs.items():\n if value is None: continue\n self._writer.add_scalar(template % key, value, step)\n\n def tensorboard_histogram(self, step, key, vector):\n if self._writer is None:\n return\n self._writer.add_histogram(key, vector, step)\n\n @staticmethod\n def msg(*args, **kwargs):\n print(*args, **kwargs, file = stderr, flush = True)\n\n def task_specs(self): # TODO if not training set trainset & develset to {}\n from utils.param_ops import HParams\n specs = load_yaml(*self._sv_file_lock, wait_lock = False)\n _, model_type, train_type = self._module.get_configs()\n model_config = get_obj_from_config(model_type, specs['model'])\n train_config = get_obj_from_config(train_type, specs['train'])\n train_config = HParams(train_config)\n return specs['data'], model_config, train_config, specs['results']\n\n def create_join(self, *args):\n _, instance_dir = self._instance_dir\n return create_join(instance_dir, *args)\n\n def initial_or_restore(self, model, optimizer = None, restore_from_best_validation = False):\n model_fname = None\n if not restore_from_best_validation and isfile(self._ckpt_fname):\n # if not set_vocab(vis_path, r_pu_su[0].py_vocabs, vocab_size):\n # recorder.set_resume_cleaner(lambda mj, mn: clean_epoch(vis_path, mj)) # no mn\n # self._path = vis_path\n # self._init = None\n # # self._pool = []\n\n # def list_func(self, *token):\n # if self._init is None or self._init == token:\n # if self._init is None:\n # clean_tree_heads(self._path)\n model_fname = self._ckpt_fname\n\n elif isdir(self._model_dir) or restore_from_best_validation:\n resutls = load_yaml(*self._sv_file_lock, wait_lock = False)['results']\n if resutls:\n best_model = max(resutls, key = lambda x: resutls[x])\n model_fname = join(self._model_dir, best_model)\n\n if model_fname is None:\n epoch = global_step = 0\n fine_validation = False\n md = dict(model.named_parameters())\n self.log(dict_print(zip_nt_params(md), v_to_str = lambda tensor: '*'.join(str(s) for s in tensor.shape)))\n total = 0\n for t in md.values():\n x = 1\n for s in t.shape:\n x *= s\n total += x\n self.log('Total:', total)\n else:\n checkpoint = torch.load(model_fname)\n try:\n model.load_state_dict(checkpoint['model_state_dict'])\n except:\n model_old_dict = checkpoint['model_state_dict']\n model_new_dict = model.state_dict()\n new_keys = tuple(model_new_dict)\n from utils.shell_io import byte_style\n for old_key in tuple(model_old_dict):\n if old_key in new_keys:\n continue\n new_candidates = {}\n old_segs = old_key.split('.')\n old_segs.reverse()\n for new_key in new_keys:\n new_segs = new_key.split('.')\n new_segs.reverse()\n if new_segs[0] != old_segs[0]:\n continue\n match_depth = 0\n for ns, os in zip(new_segs, old_segs):\n if ns == os:\n match_depth += 1\n if match_depth > 1:\n new_candidates[new_key] = match_depth\n new_candidates = sorted(new_candidates, key = new_candidates.get, reverse = True)\n if len(new_candidates) == 1:\n new_key = new_candidates[0]\n more = len(new_key) - len(old_key)\n prompt = byte_style('Rename ', '1') # red\n if more > 0:\n prompt += ' ' * more\n prompt += old_key\n prompt += byte_style('\\n as ', '2') # green\n else:\n more = 0 - more\n prompt += old_key\n prompt += byte_style('\\n as ', '2') # green\n prompt += ' ' * more\n prompt += new_key\n print(prompt)\n else:\n prompt = f'Change {old_key} into:\\n'\n for i, k in enumerate(new_candidates):\n prompt += f'{i}) {k}\\n'\n new_key = input(prompt)\n if new_key == 'q':\n exit()\n new_key = int(new_key)\n assert new_key in range(len(new_candidates))\n new_key = new_candidates[new_key]\n change_key(model_old_dict, old_key, new_key)\n model.load_state_dict(checkpoint['model_state_dict'])\n decision = input(f'Save change to {model_fname}? [Y]')\n if decision == 'Y':\n torch.save(checkpoint, model_fname)\n\n if optimizer is not None:\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n epoch, fine_validation, global_step = checkpoint['status']\n self._key = checkpoint['key']\n \n self.log(f\"Model restored from\", model_fname)\n Recorder.msg(f'Model Restored at {epoch:.2f}, key score {self._key:.2f}')\n if restore_from_best_validation:\n return epoch, global_step\n epoch = int(epoch)\n return epoch, fine_validation, global_step\n\n def check_betterment(self, epoch, falling, global_step, model, optimizer, key):\n if isnan(key):\n key = float('-inf')\n specs = load_yaml(*self._sv_file_lock, wait_lock = False)\n betterment = (self._key is None or self._key < key)\n in_top_k = any(old_key < key for old_key in specs['results'].values())\n fine_validation = falling and not betterment\n torch.save({'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'status': (epoch, fine_validation, global_step),\n 'key': key}, self._ckpt_fname)\n if betterment or in_top_k:\n if betterment:\n self._key = key\n model_fname = timestamp(epoch)\n copy_with_prefix_and_rename(self._ckpt_fname, self._model_dir, model_fname)\n specs['results'][model_fname] = key\n results = specs['results']\n if len(results) > self._keep_top_k:\n weakest_model = min(results, key = lambda x: results[x])\n remove(join(self._model_dir, weakest_model))\n results.pop(weakest_model)\n self.log(' Replace worst model', weakest_model, 'with a', 'new best' if betterment else 'better', 'model', model_fname)\n else:\n self.log(' A new', 'best' if betterment else 'better', 'model', model_fname)\n save_yaml(specs, *self._sv_file_lock, wait_lock = False)\n else:\n self.log()\n return betterment\n\n def register_test_scores(self, scores):\n instance, _ = self._instance_dir\n rt = load_yaml(*self._rt_file_lock)\n rt[instance] = scores\n save_yaml(rt, *self._rt_file_lock)\n\n @staticmethod\n def experiments_status(task_path):\n rt_file = join(task_path, _rt_file)\n rt_lock = join(task_path, _rt_lock)\n (instance_status, unlock), modifed = load_yaml(rt_file, rt_lock, True), False\n status = dict(locking = [], unlocked = [], other = [], tested = [])\n folders = listdir(task_path)\n\n name_len = 0\n instance_folders = []\n for fx in folders:\n if '.' in fx:\n sep = fx.index('.')\n instance = fx[:sep]\n exp_name = fx[sep+1:]\n else:\n instance = fx\n exp_name = None\n instance_path = join(task_path, fx)\n if isdir(instance_path):\n if instance in instance_status:\n name_len = max(name_len, len(instance))\n if isfile(join(instance_path, _sv_lock)):\n status['locking'].append(instance_path) # avoid ongoing experiments\n else:\n instance_folders.append((instance, exp_name, fx, instance_path))\n else:\n status['other'].append(instance_path)\n\n rename_list = []\n instance_folders.sort(key = lambda x: int(x[0]))\n for _cnt, (instance, exp_name, folder, fpath) in enumerate(instance_folders):\n _instance = str(_cnt)\n ap_zeros = name_len - len(_instance)\n _instance = '0' * ap_zeros + _instance\n modify = instance != _instance\n if modify:\n new_folder = f'{_instance}.{exp_name}' if exp_name else _instance\n new_fpath = join(task_path, new_folder)\n change_key(instance_status, instance, _instance)\n rename_list.append((fpath, new_fpath))\n fpath = new_fpath + '\\t<- ' + folder\n instance = _instance\n modifed = True\n key = instance_status[instance].get('key')\n if key:\n status['tested'].append(f'({key:.2f}) {fpath}')\n else:\n status['unlocked'].append(f'(?) {fpath}')\n\n unlock()\n if modifed:\n save_yaml(instance_status, rt_file, rt_lock)\n for fpath, new_fpath in rename_list:\n rename(fpath, new_fpath)\n return status\n\n @property\n def evalb(self):\n return self._evalb\n\n @property\n def key_score(self):\n return self._key\n\nfrom utils.param_ops import zip_nt_params, iter_zipped_nt_params\ndef get_obj_from_config(types, configs):\n # import pdb; pdb.set_trace()\n model_params = {}\n for k, vi, vj in iter_zipped_nt_params(types, configs):\n # if vi.is_valid(vj):\n # model_params[k] = vj\n # else:\n model_params[k] = vi[vj]\n return zip_nt_params(model_params)\n\ndef timestamp(main, prefix = 'M'):\n if isinstance(main, str):\n return float(main[1:])\n return f'{prefix}{main:.2f}'","sub_path":"utils/recorder.py","file_name":"recorder.py","file_ext":"py","file_size_in_byte":18290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"258623249","text":"#!/usr/bin/env python\n\"\"\"\nVery simple HTTP server in python.\nUsage::\n ./dummy-web-server.py []\nSend a GET request::\n curl http://localhost\nSend a HEAD request::\n curl -I http://localhost\nSend a POST request::\n curl -d \"foo=bar&bin=baz\" http://localhost\n\"\"\"\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport urllib\n\nfrom lib.LogClient import LogClient\nfrom heater.Heater import Interval\n\nlog = LogClient(\"simple_geo_server\")\n\n\nclass S(BaseHTTPRequestHandler):\n heater_interval = Interval(log)\n\n def _set_headers(self):\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n\n def do_GET(self):\n self._set_headers()\n log.info('url path: ' + self.path)\n if self.path == '/favicon.ico':\n return\n\n if self.path == '/heater':\n heater_set = S.heater_interval.heater_set()\n next_moment = S.heater_interval.check_when()\n\n if not heater_set:\n log.info(\"heater not set, next moment to check: %s\" % next_moment)\n self.wfile.write(bytes(\"no|%s,0\" % next_moment, 'UTF-8'))\n\n if next_moment < 30000:\n duration = S.heater_interval.heat_duration()\n log.info(\"heater time! time off: %s, duration: %s\" % (next_moment, duration))\n self.wfile.write(bytes(\"yes|0,%s\" % duration, 'UTF-8'))\n else:\n log.info(\"heater set, but not yet time to heat. time left: %s\" % next_moment)\n self.wfile.write(bytes(\"no|%s,0\" % next_moment, 'UTF-8'))\n\n log.info('heater: request')\n else:\n url_split = urllib.parse.urlsplit(self.path)\n args = urllib.parse.parse_qs(url_split.query)\n log.info(\"geo: %s\" % args)\n print(args)\n self.wfile.write(bytes(\"

hi!

\", 'UTF-8'))\n\n def do_HEAD(self):\n self.send_response(200)\n self.send_header('Content-type', 'text/plain')\n self.end_headers()\n\n def do_POST(self):\n # Doesn't do anything with posted data\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n self._set_headers()\n print(\"POST: \" + post_data)\n self.wfile.write(\"

POST!

\" + post_data + \"
\")\n\n\ndef run(server_class=HTTPServer, handler_class=S, port=8060):\n server_address = ('', port)\n httpd = server_class(server_address, handler_class)\n print('Starting httpd...')\n httpd.serve_forever()\n\n\nif __name__ == \"__main__\":\n from sys import argv\n\n if len(argv) == 2:\n run(port=int(argv[1]))\n else:\n run()\n\n","sub_path":"bin/simple_geo_server.py","file_name":"simple_geo_server.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"507472228","text":"import cv2\nimport sys\n\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\neye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')\ncap = cv2.VideoCapture(sys.argv[1]) \ng1=sys.argv[1]\ng2=g1[-5:-4]\ng=int(g2)\ni = (g-1)*500 + 1\n#i=1\n#cap=cv2.VideoCapture('c6.MP4')\nflag = False\ncount=0\nwhile (cap.isOpened()):\n\tif count%100==0:\n\t\tret, img = cap.read()\n\t\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\t\tfaces = face_cascade.detectMultiScale(gray, 1.3, 5)\n\t\tfor(x,y,w,h) in faces:\n\t\t\tcv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2)\n\t\t\troi_gray = gray[y:y+h, x:x+w]\n\t\t\troi_color = img[y:y+h, x:x+w]\n\t\t\t#if not flag:\n\t\t\timgpath='c_final/'+str(i)+'.jpg'\n\t\t\tcv2.imwrite(imgpath , roi_color )\n\t\t\t#eyes = eye_cascade.detectMultiScale(roi_gray,scaleFactor=1.3,minNeighbors=5,minSize=(30, 30))\n\t\t\t#for(ex,ey,ew,eh) in eyes:\n\t\t\t#\tcv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,0,255),2)\n\t\t\ti=i+1\n\t\t\tif i%500==1:\n\t\t\t\tbreak\n\tcount = count+1\n\tcv2.imshow('img',img)\n\tif (cv2.waitKey(1) & 0xFF == ord('q')) or (i%500==1):\n \tbreak\n\ncap.release()\ncv2.destroyAllWindows()\n\n","sub_path":"face_eye_detect.py","file_name":"face_eye_detect.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"366899953","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nFlamingo DB build script\n\"\"\"\n\nfrom __future__ import print_function\n\n__author__ = 'Sandeep Murthy, Marek Dabek'\n\n__all__ = [\n 'archive_script',\n 'build_flamingo_db',\n 'generate_build_sql_script',\n 'parse_args',\n '_replace_in_file',\n '_unified_diff'\n]\n\nimport argparse\nimport difflib\nimport os\nimport subprocess\nimport sys\nimport time\n\nfrom subprocess import (\n check_call,\n check_output,\n)\n\ncwd = os.getcwd()\nsys.path.insert(0, cwd)\n\nsource_sql_script = os.path.join(cwd, 'schema.sql')\nbuild_sql_script = os.path.join(cwd, 'schema_run.sql')\n\n\ndef parse_args():\n \"\"\"\n Parses script arguments and constructs an args dictionary.\n \"\"\"\n\n parser = argparse.ArgumentParser(description='Build Flamingo database.')\n \n parser.add_argument(\n '-s', '--sql_server_ip', type=str, required=True,\n help=\"The SQL Server IP.\")\n \n parser.add_argument(\n '-p', '--sa_password', type=str, required=True,\n help=\"The SQL Server sa password.\")\n \n parser.add_argument(\n '-n', '--environment_name', type=str, required=True,\n help=\"The environment name.\")\n \n parser.add_argument(\n '-l', '--login_password', type=str, required=True,\n help='The SQL login password.')\n \n parser.add_argument(\n '-f', '--file_location_sql_server', type=str, required=True,\n help='The file location on the SQL server - enter path using forward slashes for the file separator.')\n \n parser.add_argument(\n '-F', '--file_location_shiny', type=str, required=True,\n help='The file location on the shiny server - enter path using forward slashes for the file separator.')\n \n parser.add_argument(\n '-v', '--version', type=str, required=True,\n help='The database version.')\n \n parser.add_argument('--mock', dest='mock', action='store_true', help='Mock the SQL command to build the DB')\n parser.add_argument('--no-mock', dest='mock', action='store_false', help='Do not mock the SQL command to build the DB')\n parser.set_defaults(mock=False)\n\n parser.add_argument('--archive', dest='archive', action='store_true', help='Archive the generated build SQL script after the DB build')\n parser.add_argument('--no-archive', dest='archive', action='store_false', help='Delete the generated build SQL script after the DB build')\n parser.set_defaults(archive=True)\n\n args = parser.parse_args()\n\n args_dict = vars(args)\n args_dict['file_location_sql_server'] = r''.join(args_dict['file_location_sql_server'].replace('/', '\\\\').replace(' ', '\\ '))\n args_dict['file_location_shiny'] = r''.join(args_dict['file_location_shiny'])\n args_dict['user_password'] = args_dict['login_password']\n\n return args_dict\n\n\ndef _replace_in_file(source_file_path, target_file_path, var_names, var_values):\n \"\"\"\n Replaces a list of placeholders / variable names in a source file with a\n matching set of values, and writes it out to a new target file.\n \"\"\"\n if len(var_names) != len(var_values):\n raise Exception('Number of variable names does not equal the number of variable values to replace - please check and try again.')\n\n try:\n with open(source_file_path, 'r') as f:\n lines = f.readlines()\n\n with open(target_file_path, 'w') as f:\n for i in range(len(lines)):\n outline = inline = lines[i]\n present_var_names = filter(lambda var_name: var_name in inline, var_names)\n if present_var_names:\n for var_name in present_var_names:\n var_value = var_values[var_names.index(var_name)]\n outline = outline.replace(var_name, var_value)\n f.write(outline)\n except (OSError, IOError) as e:\n raise e\n\n\ndef _unified_diff(file1, file2, as_string=False):\n \"\"\"\n Generates a unified diff of two files: ``file1`` and ``file2``. The files must\n be passed in as absolute paths.\n \"\"\"\n\n try:\n with open(file1, 'r') as f1:\n with open(file2, 'r') as f2:\n diff = difflib.unified_diff(\n f1.readlines(),\n f2.readlines(),\n fromfile=f1,\n tofile=f2,\n )\n except (OSError, IOError) as e:\n raise e\n else:\n if as_string:\n return ''.join(diff)\n return diff\n\n\ndef generate_build_sql_script(args, source_sql_script, build_sql_script):\n \"\"\"\n Generates the DB build SQL script using the args dictionary by reading in\n the source SQL script and replacing a fixed set of variable names (or\n placeholders) with values from the create_db.py script arguments.\n \"\"\"\n\n var_names = [\n '%ENVIRONMENT_NAME%',\n '%USER_PASSWORD%',\n '%VERSION%',\n '%FILE_LOCATION_SQL_SERVER%',\n '%FILE_LOCATION_SHINY%'\n ]\n\n var_values = [\n args[var_name.strip('%').lower()] if var_name else None for var_name in var_names\n ]\n\n _replace_in_file(source_sql_script, build_sql_script, var_names, var_values)\n\n\ndef build_flamingo_db(args, build_sql_script, mock=False):\n \"\"\"\n Runs the SQL command to build the Flamingo DB using the args dictionary\n and the generated build SQL script, and returns the absolute path of the\n script. Can be mocked by setting the optional `mock` parameter to `True`.\n \"\"\"\n\n sqlcmd_str = 'sqlcmd -S {} -d master -U sa -P {} -i {}'.format(\n args['sql_server_ip'], args['sa_password'], build_sql_script)\n \n if not mock:\n check_call(sqlcmd_str.split(' '))\n\n return sqlcmd_str\n\n\ndef archive_script(build_sql_script):\n \"\"\"\n Archives the generated build SQL script in the working directory using a timestamp.\n \"\"\"\n\n timestamp_str = time.strftime('%d-%m-%Y_%H:%M:%S_GMT', time.gmtime())\n archived_script = '{}.{}'.format(build_sql_script, timestamp_str)\n os.rename(build_sql_script, archived_script)\n\n return archived_script\n\n\nif __name__ == '__main__':\n\n try:\n print('\\nParsing arguments: ', end='')\n args = parse_args()\n print(args)\n time.sleep(3)\n\n print('\\nGenerating build SQL script {} from source SQL script {}'.format(build_sql_script, source_sql_script))\n generate_build_sql_script(args, source_sql_script, build_sql_script)\n time.sleep(3)\n script_diff = _unified_diff(source_sql_script, build_sql_script, as_string=True)\n print('\\nDiff of source script -> build script: {}'.format(script_diff))\n time.sleep(5)\n\n print('\\nBuilding Flamingo DB: ' if not args['mock'] else '\\nMocking Flamingo DB build: ', end='')\n build_cmd = build_flamingo_db(args, build_sql_script, mock=args['mock'])\n print('command used \"{}\"'.format(build_cmd))\n time.sleep(3)\n\n if args['archive']:\n print('\\nArchiving build SQL script: ', end='')\n archived_script = archive_script(build_sql_script)\n print('archived as \"{}\"'.format(archived_script))\n else:\n print('\\nDeleting build SQL script')\n os.remove(build_sql_script)\n time.sleep(3)\n except Exception as e:\n print(str(e))\n sys.exit(1)\n else:\n if not args['mock']:\n print('\\nScript executed successfully - exiting.')\n time.sleep(3)\n sys.exit(0)\n","sub_path":"SQLFiles/create_db.py","file_name":"create_db.py","file_ext":"py","file_size_in_byte":7448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"426940740","text":"from collections import deque\n\n\ndef run(player_count, last_marble_value):\n circle = [0]\n marble_num = 1\n current_marble_index = 0\n scores = [0 for _ in range(player_count)]\n while marble_num < last_marble_value:\n for elf in range(player_count):\n if marble_num > last_marble_value:\n break\n if marble_num % 23 != 0:\n current_marble_index = (current_marble_index + 1) % len(circle) + 1\n circle.insert(current_marble_index, marble_num)\n else:\n current_marble_index -= 7\n if current_marble_index < 0:\n current_marble_index += len(circle)\n\n removed = circle.pop(current_marble_index)\n scores[elf] += marble_num + removed\n marble_num += 1\n return max(scores)\n\n\ndef run_polished(player_count: int, last_marble_value: int) -> int:\n circle = deque([0])\n scores = [0 for _ in range(player_count)]\n for marble_num in range(1, last_marble_value + 1):\n if marble_num % 23 == 0:\n circle.rotate(7)\n scores[(marble_num % player_count) - 1] += marble_num + circle.pop()\n circle.rotate(-1)\n else:\n circle.rotate(-1)\n circle.append(marble_num)\n return max(scores)\n\n\ndef main():\n input_txt, _ = helpers.load_input(9, \"Marble Mania\")\n\n split = input_txt.split()\n player_count = int(split[0])\n last_marble_value = int(split[6])\n\n print(f\"part1: {run(player_count, last_marble_value)}\")\n print(f\"part2: {run_polished(player_count, 100 * last_marble_value)}\")\n\n\nif __name__ == \"__main__\":\n import helpers\n main()\n","sub_path":"day09.py","file_name":"day09.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"653885143","text":"# pages/views.py\n# from django.views.generic import TemplateView\nfrom django.shortcuts import render, redirect\nfrom django.views.generic import ListView, CreateView\nfrom .models import Book, Order, OrderItem, Review\nfrom django.db.models import Q\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nimport decimal as decimal\n\n\ndef home(request):\n return render(request, 'home.html', {'title': 'Home'})\n\n\ndef about(request):\n return render(request, 'about.html', {'title': 'About'})\n\n\ndef browse(request):\n context = {}\n query = \"\"\n if request.GET:\n query = request.GET['q']\n context['query'] = str(query)\n context = {\n 'title': 'Browse',\n 'books': get_queryset(query),\n }\n return render(request, 'browse.html', context)\n\n\n@login_required\ndef add_to_cart(request, isbn):\n item = Book.objects.filter(isbn=isbn).first()\n price = decimal.Decimal(item.price)\n order, created = Order.objects.get_or_create(user=request.user,\n Order_Value=0)\n orderitem, created = OrderItem.objects.get_or_create(item=item,\n Item_Price=price,\n order=order)\n order.items.add(orderitem)\n order.save()\n messages.success(request, 'Cart Updated!')\n return redirect('pages-browse')\n\n\n@login_required\ndef delete_from_cart(request, pk):\n item_to_delete = OrderItem.objects.filter(ItemNumber=pk)\n if item_to_delete.exists():\n item_to_delete[0].delete()\n messages.info(request, \"Item has been deleted\")\n return redirect('order')\n\n\nclass BookListView(ListView):\n model = Book\n template_name = 'browse.html'\n context_object_name = 'books'\n ordering = ['title']\n\n\ndef bookdetail(request, pk):\n book = Book.objects.filter(isbn=pk).first()\n r = list(Review.objects.filter(belongs=book))\n context = {\n 'title': pk,\n 'object': book,\n 'reviews': r\n }\n return render(request, 'book_detail.html', context)\n\n\nclass ReviewCreateView(CreateView):\n model = Review\n template_name = 'review.html'\n fields = ['title', 'text']\n\n def form_valid(self, form):\n form.instance.author = self.request.user\n return super().form_valid(form)\n\n\ndef order(request):\n o = Order.objects.filter(user=request.user)\n o = list(o.first().items.all())\n context = {\n 'title': 'Order',\n 'items': o\n }\n\n return render(request, 'order_detail.html', context)\n\n\ndef get_queryset(query=None):\n queryset = []\n queries = query.split(\" \")\n for q in queries:\n results = Book.objects.filter(\n Q(title__icontains=q) |\n Q(isbn__icontains=q)\n ).distinct()\n for b in results:\n queryset.append(b)\n\n return list(set(queryset))\n\n\ndef complete(request):\n return render(request, 'complete.html', {'title': 'Complete'})\n","sub_path":"pages/pages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"15419717","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom .forms import BuscarViajeForm\nfrom django.db.models import Q,F\nfrom django.db.models.expressions import RawSQL\nfrom Nucleo.models import Viaje\nfrom django.http import HttpResponseRedirect\nfrom django.views.generic import ListView\nclass BuscarList(ListView):\n model = Viaje\n template_name = \"classroom/buscar/listarbusqueda.html\"\n def get_queryset(self):\n origen =self.request.GET.get('origen')\n destino = self.request.GET.get('destino')\n fecha = self.request.GET.get('fecha')\n print(fecha)\n viajes = Viaje.objects.all().filter(tiempo_inicio__date=fecha, realizado=False)\n borrar = []\n for v in viajes:\n if not v.tramos.filter(ciudad_origen=origen).exists() and not v.tramos.filter(ciudad_destino=destino).exists():\n borrar.append(v.pk)\n for pk in borrar:\n viajes=viajes.exclude(pk=pk)\n\n return viajes.filter(tramos__ciudad_origen=origen).annotate(lahora = F('tramos__hora_inicio')).annotate(\n mi_origen=RawSQL(\"SELECT '%s'\"%(origen,), ())).annotate(mi_destino=RawSQL(\"SELECT '%s'\"%(destino,), ()))\n\n\ndef Buscar(request):\n form = BuscarViajeForm(request.POST)\n if request.method == \"POST\" and form.is_valid():\n origen = form.cleaned_data['ciudad_origen']\n destino = form.cleaned_data['ciudad_destino']\n fecha = form.cleaned_data['fecha']\n viajes = Viaje.objects.all()\n existio = False\n\n for v in viajes:\n if v.tramos.filter(Q(ciudad_origen=origen)|Q(ciudad_destino=destino)).count()==2:\n existio=True\n break\n return HttpResponseRedirect('/buscar/listar?origen=%s&destino=%s&fecha=%s'%(origen,destino,fecha))\n else:\n form = BuscarViajeForm()\n return render(request,'classroom/buscar/buscar.html',{'form':form})","sub_path":"BlaBlauto/Busqueda/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"264613162","text":"from decimal import Decimal\nimport logging\nfrom bs4 import BeautifulSoup\nfrom storescraper.categories import CASE_FAN, COMPUTER_CASE, GAMING_CHAIR, \\\n HEADPHONES, MONITOR, MOTHERBOARD, MOUSE, NOTEBOOK, POWER_SUPPLY, PRINTER, \\\n PROCESSOR, SOLID_STATE_DRIVE, USB_FLASH_DRIVE, VIDEO_CARD\nfrom storescraper.product import Product\nfrom storescraper.store import Store\nfrom storescraper.utils import remove_words, session_with_proxy\n\n\nclass PcPart(Store):\n @classmethod\n def categories(cls):\n return [\n HEADPHONES,\n NOTEBOOK,\n SOLID_STATE_DRIVE,\n POWER_SUPPLY,\n COMPUTER_CASE,\n PRINTER,\n USB_FLASH_DRIVE,\n MONITOR,\n CASE_FAN,\n MOUSE,\n MOTHERBOARD,\n PROCESSOR,\n VIDEO_CARD,\n GAMING_CHAIR,\n ]\n\n @classmethod\n def discover_urls_for_category(cls, category, extra_args=None):\n url_extensions = [\n ['1', HEADPHONES],\n ['6', MONITOR],\n ['8', CASE_FAN],\n ['9', MOUSE],\n ['10', MOTHERBOARD],\n ['11', PROCESSOR],\n ['12', USB_FLASH_DRIVE],\n ['13', SOLID_STATE_DRIVE],\n ['14', COMPUTER_CASE],\n ['15', POWER_SUPPLY],\n ['16', VIDEO_CARD],\n ['17', NOTEBOOK],\n ['21', GAMING_CHAIR],\n ['27', PRINTER],\n ]\n\n session = session_with_proxy(extra_args)\n product_urls = []\n for url_extension, local_category in url_extensions:\n if local_category != category:\n continue\n\n url_webpage = 'https://pcpart.cl/php/categories/productos.php'\n data = session.post(url_webpage, data={\n 'categorias': url_extension}).text\n soup = BeautifulSoup(data, 'html.parser')\n product_containers = soup.findAll('div', 'product-item')\n if not product_containers:\n logging.warning('Empty category: ' + local_category)\n for container in product_containers:\n product_url = container.find('a')['href']\n product_urls.append('https://pcpart.cl/' + product_url)\n return product_urls\n\n @classmethod\n def products_for_url(cls, url, category=None, extra_args=None):\n print(url)\n session = session_with_proxy(extra_args)\n response = session.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n\n key = url.split('id=')[-1]\n\n details_items = soup.find('div', 'details-items')\n name = details_items.find('h2').text\n\n offer_price = Decimal(remove_words(details_items.find(\n 'h3', 'price-detail').text.split('Efectivo')[0]))\n normal_price = Decimal(remove_words(details_items.find(\n 'h4', 'subprice-detail').text.split('Otros')[0]))\n model = details_items.find('li', 'd-block').contents[2].strip()\n stock = int(details_items.find('h3', 'hurry-title').find('span').text)\n\n picture_urls = []\n picture_container = details_items.find(\n 'div', {'id': 'imagenes-galeria'})\n for i in picture_container.findAll('img'):\n picture_urls.append(i['src'])\n\n p = Product(\n name,\n cls.__name__,\n category,\n url,\n url,\n key,\n stock,\n normal_price,\n offer_price,\n 'CLP',\n sku=key,\n part_number=model,\n picture_urls=picture_urls,\n )\n return [p]\n","sub_path":"storescraper/stores/pc_part.py","file_name":"pc_part.py","file_ext":"py","file_size_in_byte":3630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"154639101","text":"#! /usr/bin/env python3\n\n__all__ = [\n 'FrameCorners',\n 'CornerStorage',\n 'build',\n 'dump',\n 'load',\n 'draw',\n 'without_short_tracks'\n]\n\nimport click\nimport cv2\nimport numpy as np\nimport pims\nimport _corners\n\nfrom _corners import FrameCorners, CornerStorage, StorageImpl\nfrom _corners import dump, load, draw, without_short_tracks, create_cli\n\n\nclass _CornerStorageBuilder:\n\n def __init__(self, progress_indicator=None):\n self._progress_indicator = progress_indicator\n self._corners = dict()\n\n def set_corners_at_frame(self, frame, corners):\n self._corners[frame] = corners\n if self._progress_indicator is not None:\n self._progress_indicator.update(1)\n\n def build_corner_storage(self):\n return StorageImpl(item[1] for item in sorted(self._corners.items()))\n\n\ndef _to256(img):\n return np.array(img * 256, dtype=np.uint8)\n\n\nclass _CornerTracker:\n\n def __init__(self):\n self.n_corners = 300\n self.circle_size = 4\n self.safe_area_radius = 8\n self.image = None\n self.corners = None\n self.safe_area_delta = [(i, j) for i in range(-self.safe_area_radius, self.safe_area_radius + 1) for j in\n range(-self.safe_area_radius, self.safe_area_radius + 1)\n if i ** 2 + j ** 2 < self.safe_area_radius ** 2]\n self.mask = None\n self.flow_params = dict(winSize=(15, 15),\n maxLevel=2,\n criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.3),\n minEigThreshold=0.005)\n self.corners_params = dict(maxCorners=self.n_corners,\n qualityLevel=0.05,\n minDistance=self.safe_area_radius,\n blockSize=10,\n gradientSize=1,\n mask=None)\n\n def update_mask(self):\n def is_bounded(x, y):\n return 0 <= x < self.image.shape[1] and 0 <= y < self.image.shape[0]\n self.mask = np.ones((self.image.shape[1], self.image.shape[0]), dtype=np.uint8)\n for point in np.array(self.corners.points, dtype=np.int32):\n for d in self.safe_area_delta:\n neighbour = point + d\n if is_bounded(*neighbour):\n self.mask[tuple(neighbour)] = 0\n\n def add_new_corners(self, new_corner_points):\n if new_corner_points is None:\n return\n new_corner_points = np.array(new_corner_points, dtype=np.int32).reshape(-1, 2)\n if self.corners is None:\n self.corners = FrameCorners(np.array(range(new_corner_points.shape[0])),\n new_corner_points,\n np.array([self.circle_size] * new_corner_points.shape[0]))\n return\n self.corners.add_corners(new_corner_points, self.circle_size)\n\n def update_image(self, new_image):\n if self.image is None:\n self.image = new_image\n self.add_new_corners(cv2.goodFeaturesToTrack(new_image, **self.corners_params))\n else:\n if self.corners.ids.shape[0] != 0:\n updated_corner_points, status, _ = cv2.calcOpticalFlowPyrLK(_to256(self.image),\n _to256(new_image),\n np.array(self.corners.points,\n dtype=np.float32).reshape(-1, 2),\n None, **self.flow_params)\n status = np.array(status, dtype=np.bool).reshape(-1)\n self.corners = _corners.filter_frame_corners(self.corners, status) # remove non-tracked points\n self.corners._points = np.array(updated_corner_points)[status] # update coordinates\n self.corners_params['maxCorners'] = self.n_corners - self.corners.points.shape[0]\n if self.corners_params['maxCorners'] > 0:\n self.update_mask()\n self.corners_params['mask'] = self.mask.transpose()\n self.add_new_corners(cv2.goodFeaturesToTrack(new_image, **self.corners_params))\n self.image = new_image\n return self.corners\n\n\ndef _build_impl(frame_sequence: pims.FramesSequence,\n builder: _CornerStorageBuilder) -> None:\n tracker = _CornerTracker()\n for frame, image in enumerate(frame_sequence):\n corners = tracker.update_image(image)\n builder.set_corners_at_frame(frame, corners)\n\n\ndef build(frame_sequence: pims.FramesSequence,\n progress: bool = True) -> CornerStorage:\n \"\"\"\n Build corners for all frames of a frame sequence.\n\n :param frame_sequence: grayscale float32 frame sequence.\n :param progress: enable/disable building progress bar.\n :return: corners for all frames of given sequence.\n \"\"\"\n if progress:\n with click.progressbar(length=len(frame_sequence),\n label='Calculating corners') as progress_bar:\n builder = _CornerStorageBuilder(progress_bar)\n _build_impl(frame_sequence, builder)\n else:\n builder = _CornerStorageBuilder()\n _build_impl(frame_sequence, builder)\n return builder.build_corner_storage()\n\n\nif __name__ == '__main__':\n create_cli(build)() # pylint:disable=no-value-for-parameter\n","sub_path":"camtrack/corners.py","file_name":"corners.py","file_ext":"py","file_size_in_byte":5632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"120925508","text":"from math import log, sqrt\ndef li(x):\n\tR = sqrt(x)\n\tg = 0.577215664901532\n\tp = -2.0\n\tS = g + log(log(x))\n\tfor n in range(1, 16):\n\t\tp *= -0.5*log(x)/n\n\t\tI = 0\n\t\tfor k in range((n-1)//2 + 1):\n\t\t\tI += 1/(2*k+1)\n\t\t#print(\"p = {}, I = {}\".format(p, I))\n\t\tS += R*p*I\n\treturn(S)\n","sub_path":"2018/students/lili.py","file_name":"lili.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"283834409","text":"from flask import Flask,Blueprint\nfrom bson.json_util import dumps\nfrom bson.objectid import ObjectId\nfrom flask import jsonify,request\nfrom .extentions import mongo\n# from werkzeug.security import generate_password_hash,check_password_hash\n\n\n# app.secret_key=\"keykey\"\n# app.config['MONGO_URI'] = \"mongodb://localhost:27017/bookstore\"\n# mongo = PyMongo(app)\n\nbk = Blueprint('bk',__name__)\n\n@bk.route(\"/addbook\",methods=[\"POST\"])\ndef add_book():\n _json = request.json\n _bookname = _json['bookname']\n _author = _json['author']\n\n if _bookname and _author and request.method == 'POST':\n mongo.db.booktable.insert({'bookname':_bookname,'author':_author})\n resp = jsonify(\"book added successfully\"),200\n return resp\n else:\n return not_found()\n\n@bk.route('/booklist',methods=['GET'])\ndef booklist():\n bookname = request.args.get('bookname')\n author = request.args.get('author')\n if author or bookname:\n output = []\n for q in mongo.db.booktable.find({\"$or\":[{'bookname':bookname},{'author':author}]}): \n output.append({'bookname' : q['bookname'], 'author' : q['author']})\n if len(output)!=0 :\n return jsonify({'result' : output})\n else:\n return jsonify({'result' : 'No Results Found'})\n\n else:\n book = mongo.db.booktable.find()\n resp = dumps(book)\n return resp\n\n\n@bk.route('/book/',methods=['GET'])\ndef find_book(id):\n book = mongo.db.booktable.find_one({'_id':ObjectId(id)})\n resp = dumps(book)\n return resp\n\n\n@bk.route('/update/',methods=['PUT'])\ndef update(id):\n _id=id\n _json = request.json\n _bookname = _json['bookname']\n _author = _json['author']\n\n if _bookname and _author and request.method == 'PUT':\n mongo.db.booktable.update_one({'_id': ObjectId(_id['$oid']) if '$oid' in _id else ObjectId(_id)},\n {'$set': {'bookname': _bookname, 'author': _author}}) \n\n resp = jsonify(\"Updated successfully\"),200\n return resp\n else:\n return not_found()\n\n@bk.route('/delete/',methods=['DELETE'])\ndef delete(id):\n mongo.db.booktable.delete_one({'_id':ObjectId(id)})\n resp = jsonify(\"deleted successfully\"),200\n return resp\n\n@bk.errorhandler(404)\ndef not_found(error=None):\n message = {\n 'status':404,\n 'message':'not fount'+request.url\n }\n resp=jsonify(message)\n resp.status_code=200\n return resp\n\n","sub_path":"book/bookstore.py","file_name":"bookstore.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"450800739","text":"import responses\nimport os.path\nimport re\nfrom pprint import pprint\nfrom . import phdcomics\n\n\n@responses.activate\ndef test_crawler():\n def file_callback(request):\n if request.url == \"http://phdcomics.com/comics/archive.php?comicid=1919\":\n filename = \"phdcomics_test_yesterday.html\"\n elif request.url == \"http://phdcomics.com/comics/archive.php?comicid=1918\":\n filename = \"phdcomics_test_before_yesterday.html\"\n elif request.url == \"http://phdcomics.com/\":\n filename = \"phdcomics_test_today.html\"\n else:\n print(request.url)\n return 404, [], ''\n\n basepath = os.path.dirname(os.path.abspath(__file__))\n with open(os.path.join(basepath, filename)) as f:\n return 200, [], f.read()\n\n responses.add_callback(responses.GET,\n re.compile('http://phdcomics\\.com/.*'),\n callback=file_callback)\n\n p = phdcomics.PHDComics()\n results = p.crawl(depth=3)\n\n assert len(results) == 3\n\n # for i, episode in enumerate(results):\n # print('''assert results[{i}].name == '{episode.name}' '''.format(episode=episode, i=i))\n # print('''assert results[{i}].series == '{episode.series}' '''.format(episode=episode, i=i))\n # print('''assert results[{i}].image_url == '{episode.image_url}' '''.format(episode=episode, i=i))\n # print()\n\n assert results[0].name == 'The new busy'\n assert results[0].series == 'PHD Comics'\n assert results[0].image_url == 'http://www.phdcomics.com/comics/archive/phd020117s.gif'\n\n assert results[1].name == 'How I Write'\n assert results[1].series == 'PHD Comics'\n assert results[1].image_url == 'http://www.phdcomics.com/comics/archive/phd012717s.gif'\n\n assert results[2].name == 'Rihanna'\n assert results[2].series == 'PHD Comics'\n assert results[2].image_url == 'http://www.phdcomics.com/comics/archive/phd012517s.gif'\n","sub_path":"komiksowiec/comics/phdcomics_test.py","file_name":"phdcomics_test.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"363442762","text":"\"\"\"Exercise 6: Using Objects\"\"\"\r\n\r\nclass Person:\r\n def __init__(self, name, age):\r\n self.name = name\r\n self.age = age \r\n\r\n def introduce_self(self):\r\n print(f\"I am {self.name} and I am {self.age} years old\")\r\n\r\n def compare_to(self, other_person):\r\n if self.age > other_person.age:\r\n print(f'I am {self.age - other_person.age} years older than '\r\n f'{person2.name}.')\r\n elif self.age < other_person.age:\r\n print(f'I am {other_person.age - self.age} years younger than '\r\n f'{person2.name}.')\r\n else:\r\n print(f'I am the same age as {other_person.name}.')\r\n\r\nif __name__ == '__main__':\r\n person1 = Person('Amy', 31)\r\n person2 = Person('Mike', 25)\r\n\r\n person1.introduce_self()\r\n person2.introduce_self()\r\n person1.compare_to(person2)\r\n person2.compare_to(person1)","sub_path":"labs/SPD-2.3-VSCode-Debugger-Lab/exercise-6.py","file_name":"exercise-6.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"316227359","text":"from singa import layer\nfrom singa import metric\nfrom singa import loss\nfrom singa import device\nfrom singa import net as ffnet\nfrom singa import tensor\nimport numpy as np\nimport data\n\ndef create(distinct_code_count, code_embed_size, demo_feature_count,visit_embed_size, use_cpu=True):\n if use_cpu:\n layer.engine = 'singacpp'\n\n net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Recall(top_k=100))\n wdense = layer.Dense('code_dense', code_embed_size, input_sample_shape=(distinct_code_count,))\n net.add(wdense)\n code_relu = layer.Activation('Code_RELU')\n net.add(code_relu)\n #dummy layer needs to explicitly setup\n demo_dummy = layer.Dummy(\"demo\")\n demo_dummy.setup((demo_feature_count, ))\n net.add(demo_dummy, src = [])\n net.add(layer.Concat('visit_concat', 1, [(code_embed_size, ),(demo_feature_count, )]),src =[code_relu, demo_dummy])\n net.add(layer.Dense('visit_dense', visit_embed_size))\n net.add(layer.Activation('visit_RELU'))\n net.add(layer.Dense('output_dense', distinct_code_count))\n\n#Init the weight from -1 to 1 and init bias to 1\n for (p, name) in zip(net.param_values(), net.param_names()):\n # print name, p.shape\n if \"weight\" in name:\n p.uniform(-1,1)\n elif \"bias\" in name:\n p.set_value(0)\n\n return net, wdense.param_values()[0]\n\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"245272116","text":"import json\n\nimport requests\nfrom django.shortcuts import render\n\nfrom rest_framework import viewsets, status\n\n# Application Import\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom . import models\nfrom . import serializers\n\n# Create your views here\n# string template\nfrom string import Template\n\n\nclass UserDetailViewset(APIView):\n \"\"\"\n List all snippets, or create a new snippet.\n \"\"\"\n def get(self, request, format=None):\n print(\"get\",request.data)\n snippets = models.UserDeatils.objects.all()\n serializer = serializers.UserDetailSerializers(snippets, many=True)\n return Response(serializer.data)\n\n def post(self, request, format=None):\n res=\"error\"\n\n user_password=request.data['result']['parameters']\n intent_name=request.data['result']['metadata']['intentName']\n\n headers = {'Content-Type': 'application/json'}\n\n if intent_name ==\"Activecollab\":\n r = requests.post(\"https://my.activecollab.com/api/v1/external/login\", headers=headers,\n data=json.dumps(user_password))\n\n if r.status_code == 200:\n next_request={\"client_vendor\":\"zencode\",\"client_name\":\"hi\"}\n # print(\"call success, the response is given below \", r.json())\n response = r.json()\n # print(response[\"user\"][\"intent\"])\n res=response[\"user\"][\"intent\"]\n next_request[\"intent\"] = str(response[\"user\"][\"intent\"])\n # print(next_request)\n token_response = requests.post(\"https://app.activecollab.com/142587/api/v1/issue-token-intent\", headers=headers,\n data=json.dumps(next_request))\n if token_response.status_code ==200:\n token_value=token_response.json()\n user_password[\"token\"] = token_value['token']\n headers[\"X-Angie-AuthApiToken\"]=token_value['token']\n get_project_list = requests.get(\"https://app.activecollab.com/142587/api/v1/projects\",headers=headers)\n if get_project_list.status_code ==200:\n response_dict={}\n get_project_list=get_project_list.json()\n for i in get_project_list:\n response_dict[str(i['id'])] = i['name']\n # print(i['id'])\n # print(i['name'])\n # print(\"id:\"+str(token_value['token']).split('-')[0])\n # time_add_dict[\"user_id\"]=int(str(token_value['token']).split('-')[0])\n # print(request.data[''])\n\n message=\"Choose Project Id :\"+str(response_dict)\n # print(json.dumps(t.substitute(expect=True,message=message,intentname=\"Activecollab - custom\",token=token_value['token'])))\n return Response({\n \"data\": {\n \"google\": {\n \"expectUserResponse\": True,\n \"richResponse\": {\n \"items\": [\n {\n \"simpleResponse\": {\n \"textToSpeech\": message\n }\n }\n ]\n }\n }\n },\n \"contextOut\": [\n {\n \"name\": \"Activecollab-followup\",\n \"lifespanCount\": 1,\n \"parameters\": {\n\n \"token\":str(token_value['token'])\n\n\n }\n }\n ]\n })\n else:\n return Response({\"speech\": \"Email or Password Invaild Try Again\"})\n\n\n\n\n\n\n # if models.UserDeatils.objects.filter(email=user_password[\"email\"]).exists():\n # condition=models.UserDeatils.objects.get(email=user_password[\"email\"])\n # serializer = serializers.UserDetailSerializers(condition, data=user_password)\n # if serializer.is_valid():\n # serializer.save()\n # return Response({\"speech\": \"success\"})\n # return Response({\"speech\": str(serializer.errors)})\n #\n # else:\n # serializer = serializers.UserDetailSerializers(data=user_password)\n # if serializer.is_valid():\n # serializer.save()\n # return Response({\"speech\": \"Success\"})\n # else:\n # return Response({\"speech\": str(serializer.errors)})\n elif intent_name==\"Activecollab - custom\":\n # print(request.data['result']['parameters']['token'])\n # print(headers)\n\n time_add_dict={\"job_type_id\": 1,\"billable_status\": 0,\"value\": 8}\n\n time_add_dict[\"user_id\"]=int(str(request.data['result']['parameters']['token']).split('-')[0])\n time_add_dict[\"record_date\"]=str(request.data['result']['parameters']['date'])\n headers[\"X-Angie-AuthApiToken\"]=str(request.data['result']['parameters']['token'])\n\n # print()\n url=\"https://app.activecollab.com/142587/api/v1/projects/\"+str(request.data['result']['parameters']['id'])+\"/time-records\"\n\n time_add = requests.post(url,headers=headers,data=json.dumps(time_add_dict))\n if time_add.status_code==200:\n return Response({\"speech\":\"success\"})\n else:\n return Response({\"speech\":\"failure\"})\n\n\n\n\n\n\n\n # print(\"post\",request.data['result']['parameters'])\n\n\n return Response({'speech':'respone from post method'})\n # return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n","sub_path":"kseop/GlobalChat/Backend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"649841664","text":"import random\r\nimport math\r\nreal_pi = 3.1415926535897932384626433\r\nin_circle=0\r\nn=0\r\ndp = 20\r\nwhile True:\r\n n+=1\r\n x = random.random()\r\n y = random.random()\r\n if math.sqrt(x*x+y*y) < 1:\r\n in_circle += 1\r\n pi = 4*in_circle/n\r\n d = abs(pi-real_pi)\r\n if dp > d:\r\n print(n, ' : ', pi, ' : ', d)\r\n dp = d\r\n","sub_path":"pi_monte_carlo.py","file_name":"pi_monte_carlo.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"427245024","text":"from app.database import IStoreDatabase, StoreDatabase\nfrom app.factory import DefaultActorFactory, IActorFactory\nfrom app.item import Item, OrdinaryPack\nfrom app.printer import IPrinter, Printer\nfrom app.receipt import ReceiptBuilder\nfrom app.store import IStore, Store\n\nPER_ZREPORT_CUSTOMER_COUNT = 100\nPER_XREPORT_CUSTOMER_COUNT = 20\n\n\nclass StoreSimulator:\n factory: IActorFactory\n _store: IStore\n _printer: IPrinter\n\n def __init__(self) -> None:\n self.factory = DefaultActorFactory()\n manager = self.factory.create_manager()\n cashiers = [self.factory.create_cashier()]\n database = self.__init_database()\n self._store = Store(manager=manager, cashiers=cashiers, database=database)\n self._printer = Printer()\n\n def simulate_shift(self) -> None:\n customers_served = 0\n while True:\n self.__simulate_serve_customer()\n customers_served += 1\n shift_ended = self.__check_report(customers_served)\n if shift_ended:\n break\n\n def __simulate_serve_customer(self) -> None:\n cashier = self._store.get_available_cashier()\n customer = self.factory.create_customer(\n cashier=cashier, store_items=self._store.get_items()\n )\n cashier.open_receipt(receipt_builder=ReceiptBuilder())\n customer_items = customer.show_items()\n for item in customer_items:\n cashier.add_item_to_receipt(item)\n receipt = cashier.give_receipt()\n self._printer.print_receipt(\n receipt=receipt,\n catalogue=self._store.get_catalogue(),\n discounts=self._store.get_discounts(),\n )\n customer.pay_for_items()\n cashier.close_receipt()\n self._store.add_sold_items(receipt)\n\n def __check_report(self, customers_served: int) -> bool:\n if customers_served % PER_ZREPORT_CUSTOMER_COUNT == 0:\n if self._store.get_manager().answer_y_n_question():\n self._store.close_shift()\n return True\n elif customers_served % PER_XREPORT_CUSTOMER_COUNT == 0:\n if self._store.get_manager().answer_y_n_question():\n x_report = self._store.get_manager().make_X_report(\n self._store.get_sold_items(), self._store.get_revenue()\n )\n self._printer.print_X_report(x_report)\n return False\n\n def __init_database(self) -> IStoreDatabase:\n database = StoreDatabase()\n items = {\n Item(\"Milk\"): 4.99,\n Item(\"Mineral Water\"): 3.00,\n Item(\"Bread\"): 0.80,\n Item(\"Diapers\"): 1.39,\n OrdinaryPack(6, \"Beer\"): 1.00,\n Item(\"Cheese\"): 4.00,\n OrdinaryPack(10, \"Tissues\"): 1.50,\n }\n discounts = {\n (OrdinaryPack(10, \"Tissues\"),): 0.1,\n (Item(\"Bread\"), Item(\"Cheese\")): 0.05,\n (Item(\"Mineral Water\"),): 0.5,\n }\n for item in items.items():\n database.add_item(item)\n\n for discount in discounts.items():\n database.add_discount(discount)\n\n return database\n","sub_path":"Design Patterns/assignment2/Assignment 2/app/store_simulator.py","file_name":"store_simulator.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"345941260","text":"import pandas as pd\nimport os\n\n\ndirname = os.getcwd()\n\ndirname = dirname.replace('\\\\', '/')\n\ngrades = pd.read_csv(dirname + '/res/final_grade_table.csv')\n\nprint('正在生成上课最多的老师统计...')\n\n# 统计上课最多的老师\nteachers = grades.drop_duplicates('name')['instructor'].value_counts()[:10]\n\nteacher_name = teachers.index.to_list()\n\nteacher_num = teachers.to_list()\n\nteachers = pd.DataFrame([teacher_name, teacher_num]).T\n\nteachers.columns = [['teacher_name', 'num']]\n\n\ndef match_college(row):\n return grades[grades['instructor'] == str(row[0])]['course_college'].dropna().iloc[0]\n\n\nteachers['college'] = teachers.apply(match_college, axis=1)\n\nteachers.to_excel(dirname + '/res/misc/teacher.xlsx')\n\nprint('上课最多的老师统计结果生成完成')","sub_path":"misc/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"503018110","text":"from prettytable import PrettyTable\n# GIVE A NAME FOR THE TABLE\n# ie \"TABLE_NAME\"\n\ndef main_program(cur):\n # give table name here\n cmd = \"\"\"create table table_name ( Sl_No integer,\n Brand varchar,\tModel varchar,\tAnnounced varchar,\tAudio_jack varchar,\tBattery varchar,\n Bluetooth varchar,\tCPU varchar,\tChipset varchar, Colors varchar, Dimensions varchar,Display_type varchar,\n GPU varchar,\tInternal_memory varchar,\tLoud_speaker varchar,\tMemory_card varchar,\tNetwork varchar,\tOperating_System varchar,\n Primary_camera varchar,\tRAM varchar,\tRadio varchar,\tSIM varchar,\tSecondary_camera varchar,\tSensors varchar,\n Status varchar,\tUSB varchar );\"\"\"\n cur.execute(cmd)\n conn.commit()\n cur.execute(\"copy table_name from 'location/of/file/upload1.csv' delimiter ',' csv header\") # give table name here\n conn.commit()\n # cur.execute(\"select * from table_name\") # give table name here\n\n # comment the above code if youre running the code seccond time or the database is already created\n # now we have uploded data now we need to perform task from the stored data\n\n while True:\n try:\n print(\"\"\"select which of the mobile you want information for\n 1. Apple\n 2. Asus\n 3. Coolpad\n 4. Google\n 5. Lenovo\n 6. Microsoft\n 7. Motorola\n 8. Nokia\n 9. One plus\n 10. Oppo\n 11. Samsung\n 12. Vivo\n 13. Xiaomi\n \"\"\")\n choice = int(input())\n\n except:\n print(\"select the valid number from list\")\n continue\n if 0 < int(choice) < 14:\n break\n else:\n print(\"please select the number between the provided\")\n\n dt = {1: 'Apple', 2: 'Asus', 3: 'Coolpad', 4: 'Google', 5: 'Lenovo', 6: 'Microsoft', 7: \"Motorola\", 8: \"Nokia\", 9: 'One plus', 10: 'Oppo', 11: 'Samsung', 12: 'Vivo', 13: 'Xiaomi'}\n\n st = f\"\"\"select sl_no, brand, model from table_name where brand = '{dt[choice]}';\"\"\" # give table name here\n cur.execute(st)\n rows = cur.fetchall()\n\n table = PrettyTable(['Serial Number', 'Brand', 'Model'])\n for row in rows:\n table.add_row([row[0], row[1], row[2]])\n print(table)\n\n numb = input(\"select the corresponding serial_number of the model you want the information of: \\nNote if you need multiple model info enter numbers with separted by comma \\n\")\n st1 = f\"\"\"select * from table_name where sl_no in ({numb});\"\"\"\n cur.execute(st1)\n row = cur.fetchone()\n table1 = PrettyTable(['Sl_No', 'Brand', 'Model', 'Announced', 'Audio_jack', 'Battery', 'Bluetooth', 'CPU', 'Chipset', 'Colors', 'Dimensions', 'Display_type', 'GPU', 'Internal_memory', 'Loud_speaker', 'Memory_card', 'Network', 'Operating_System', 'Primary_camera', 'RAM', 'Radio', 'SIM', 'Secondary_camera', 'Sensors', 'Status', 'USB'])\n while row:\n # edit here\n table1.add_row([row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10], row[11], row[12], row[13], row[14], row[15], row[16], row[17], row[18], row[19], row[20], row[21], row[22], row[23], row[24], row[25]])\n row = cur.fetchone()\n\n print(table1)\n","sub_path":"files/main_prog.py","file_name":"main_prog.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"283324191","text":"def title_case(title, minor_words = ''):\n if title == '':\n return ''\n title = title.lower()\n minor_words = minor_words.lower()\n title_list = title.split(' ')\n minor_list = minor_words.split(' ')\n correct_list = []\n word = title_list[0]\n if len(word) > 1:\n word = word[0].upper() + word[1:]\n else:\n word = word[0].upper()\n correct_list.append(word)\n for i in range(1,len(title_list)):\n word = title_list[i]\n if title_list[i] in minor_list:\n word = title_list[i]\n elif len(word) > 1:\n word = word[0].upper() + word[1:]\n elif len(word) == 1:\n word = word[0].upper()\n correct_list.append(word)\n title = ' '.join(correct_list)\n return title\n\nprint(title_case('First a of in', 'an often into'))\n# 'First A Of In' but got 'First a Of In'.\n","sub_path":"codewars/python/title_case.py","file_name":"title_case.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"239200934","text":"# Reference from Online Tutorial on using doc2vec, GitHub repo and Medium \n\nfrom gensim.test.utils import common_texts\nfrom gensim.models.doc2vec import Doc2Vec, TaggedDocument\nfrom gensim.test.utils import get_tmpfile\nimport csv\nfrom nltk.tokenize import word_tokenize\nfrom sklearn.cluster import KMeans\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom sklearn.manifold import TSNE\nfrom sklearn.metrics.cluster import completeness_score\nfrom sklearn.decomposition import PCA\n\nmax_epochs = 10\nvec_size = 100\nalpha = 0.0002\n\ndata1 = []\ndata2 = []\ndata3 = []\nlabels = []\n\nd = {'happy':0, 'sad':1, 'angry':2, 'others':3}\ncolors = ['blue','green','red','yellow']\n\nwith open('train.txt') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n line_count = 0\n for row in csv_reader:\n if(line_count!=0):\n data1.append(row[1])\n data2.append(row[2])\n data3.append(row[3])\n labels.append(d[row[4]])\n line_count+=1\n\n# Reference from Online Tutorial on using doc2vec, GitHub repo and Medium \n\ntagged_data = [TaggedDocument(words=word_tokenize(_d.lower()), tags=[str(i)]) for i, _d in enumerate(data1)]\nmodel = Doc2Vec(vector_size=vec_size, window=5, min_count=0, workers=4)\nmodel.build_vocab(tagged_data)\n# fname = get_tmpfile(\"my_doc2vec_model\")\n# model.save(fname)\nfor epoch in range(max_epochs):\n print('Epoch '+str(epoch))\n model.train(tagged_data,total_examples=model.corpus_count,epochs=model.iter)\n # decrease the learning rate\n model.alpha -= alpha\n # fix the learning rate, no decay\n model.min_alpha = model.alpha\nmodel.save(\"d2v_1.model\")\nprint(\"Model Saved\")\n\ntagged_data = [TaggedDocument(words=word_tokenize(_d.lower()), tags=[str(i)]) for i, _d in enumerate(data2)]\nmodel = Doc2Vec(vector_size=vec_size, window=5, min_count=0, workers=4)\nmodel.build_vocab(tagged_data)\n# fname = get_tmpfile(\"my_doc2vec_model\")\n# model.save(fname)\nfor epoch in range(max_epochs):\n print('Epoch'+str(epoch))\n model.train(tagged_data,total_examples=model.corpus_count,epochs=model.iter)\n # decrease the learning rate\n model.alpha -= alpha\n # fix the learning rate, no decay\n model.min_alpha = model.alpha\nmodel.save(\"d2v_2.model\")\nprint(\"Model Saved\")\n\ntagged_data = [TaggedDocument(words=word_tokenize(_d.lower()), tags=[str(i)]) for i, _d in enumerate(data3)]\nmodel = Doc2Vec(vector_size=vec_size, window=5, min_count=0, workers=4)\nmodel.build_vocab(tagged_data)\n# fname = get_tmpfile(\"my_doc2vec_model\")\n# model.save(fname)\nfor epoch in range(max_epochs):\n print('Epoch'+str(epoch))\n model.train(tagged_data,total_examples=model.corpus_count,epochs=model.iter)\n # decrease the learning rate\n model.alpha -= alpha\n # fix the learning rate, no decay\n model.min_alpha = model.alpha\nmodel.save(\"d2v_3.model\")\nprint(\"Model Saved\")\n\n# model_test1 = Doc2Vec.load(\"d2v_1.model\")\n# model_test2 = Doc2Vec.load(\"d2v_2.model\")\n# model_test3 = Doc2Vec.load(\"d2v_3.model\")\n\n# test_data = []\n# turn3 = []\n\n# for i in range(len(labels)):\n# d = []\n# test1 = word_tokenize(data1[i].lower())\n# test2 = word_tokenize(data2[i].lower())\n# test3 = word_tokenize(data3[i].lower())\n# x1 = model_test1.infer_vector(test1)\n# x2 = model_test2.infer_vector(test2)\n# x3 = model_test3.infer_vector(test3)\n# test_data.append(np.concatenate((x1,x2,x3)))\n# turn3.append(x3)\n\n# test_data = np.array(test_data)\n# turn3 = np.array(turn3)\n# print(test_data.shape)\n# print(turn3.shape)\n\n# np.save('data_vectors_2', test_data)\n# np.save('turn3', turn3)","sub_path":"src/d2v_2.py","file_name":"d2v_2.py","file_ext":"py","file_size_in_byte":3578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"13136054","text":"import numpy as np\nimport pandas as pd\nimport sklearn as sk\nfrom sklearn import preprocessing,metrics\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import VotingClassifier\n\n###################\n# DATASET\n###################\ndata_df = pd.read_csv(\"../student-mat.csv\")\n\n###################\n# DATA PREPARATION\n###################\ndata_df = data_df.drop('Id',axis=1)\ndata_df = data_df.replace(\n ['F','M','U', 'R','LE3','GT3','no','yes','A','T','teacher','health','services','at_home','other','mother','father'],\n [0,1,0,1,0,1,0,1,0,1,1,2,3,4,0,1,2])\n\n\n##################\n# Chosen Descriptors\n###################\n# data_df=data_df[['age','traveltime','failures','freetime', 'goout','health', 'absences','Dalc','Walc']]\n\n##################\n# All Descriptors\n###################\ndata_df=data_df[['sex', 'age', 'address', 'famsize', 'Pstatus', 'Medu', 'Fedu',\n 'Mjob', 'Fjob', 'guardian', 'traveltime', 'studytime',\n 'failures', 'schoolsup', 'famsup', 'paid', 'activities', 'nursery',\n 'higher', 'internet', 'romantic', 'famrel', 'freetime', 'goout', 'Dalc',\n 'Walc', 'health', 'absences', 'G1', 'G2', 'G3']]\n\n##################\n# SEPARATION INPUTS (VARIABLES)/OUTPUTS (LABELS-TARGET)\n###################\ntrain_data_Walc, test_data_Walc = train_test_split(data_df, test_size=0.2, random_state=42)\ntrain_data_Dalc, test_data_Dalc = train_test_split(data_df, test_size=0.2, random_state=42)\n\n# Train\ntrain_data_labels_Dalc = train_data_Dalc[\"Dalc\"].copy()\ntrain_data_Dalc = train_data_Dalc.drop(\"Dalc\", axis=1)\n\ntrain_data_labels_Walc = train_data_Walc[\"Walc\"].copy()\ntrain_data_Walc = train_data_Walc.drop(\"Walc\", axis=1)\n\n# Test\ntest_data_labels_Dalc = test_data_Dalc[\"Dalc\"].copy()\ntest_data_Dalc = test_data_Dalc.drop(\"Dalc\", axis=1)\n\ntest_data_labels_Walc = test_data_Walc[\"Walc\"].copy()\ntest_data_Walc = test_data_Walc.drop(\"Walc\", axis=1)\n\n##################\n# CLASSIFICATION\n###################\n#Bayes\nBayes_Dalc=GaussianNB()\nBayes_Walc=GaussianNB()\n\n#DecisionTree\nTree_Dalc=DecisionTreeClassifier(max_depth=3)\nTree_Walc=DecisionTreeClassifier(max_depth=3)\n\n#Kneighbors\nneighbors_Dalc=sk.neighbors.KNeighborsClassifier(n_neighbors=8)\nneighbors_Walc=sk.neighbors.KNeighborsClassifier(n_neighbors=8)\n\n#Voting\nvoting_rule = 'hard'\nvoting_Dalc = VotingClassifier(estimators=[('kn', neighbors_Dalc),('dt', Tree_Dalc),('gnb',Bayes_Dalc)],voting=voting_rule)\nvoting_Walc = VotingClassifier(estimators=[('kn', neighbors_Walc),('dt', Tree_Walc),('gnb',Bayes_Walc)],voting=voting_rule)\n\nvoting_Dalc.fit(train_data_Dalc,train_data_labels_Dalc)\nvoting_Walc.fit(train_data_Walc,train_data_labels_Walc)\n\npredicted_labels_Dalc = voting_Dalc.predict(test_data_Dalc)\npredicted_labels_Walc = voting_Walc.predict(test_data_Walc)\n\n##################\n# EVALUATION: QUELQUES METRIQUES\nacc=sk.metrics.accuracy_score(test_data_labels_Dalc, predicted_labels_Dalc)\nprint(\"Taux de mauvaise classification Dalc:\\n\",1-acc)\n\nacc=sk.metrics.accuracy_score(test_data_labels_Walc, predicted_labels_Walc)\nprint(\"Taux de mauvaise classification Walc:\\n\",1-acc)","sub_path":"SupervisedClassifier/Voting.py","file_name":"Voting.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"385746396","text":"# Kaggle ML and Data Science Survey 2017\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nplt.style.use('fivethirtyeight')\nimport warnings\nwarnings.filterwarnings('ignore')\nimport base64\nimport io\nfrom scipy.misc import imread\nimport codecs\nimport squarify\n\nresponse = pd.read_csv('multipleChoiceResponses.csv',encoding='ISO-8859-1')\n\nresponse.head()\n\n# Some Basic Analysis\nprint('The total no. of respondants:',response.shape[0])\nprint('Total no. of countries with respondents:',response['Country'].nunique())\nprint('Country with highest respondents:',response['Country'].value_counts().values[0],'respondents')\nprint('Youngest respondent:',response['Age'].min(),'and Oldest respondent:',response['Age'].max())\n\n# Gender Split\nplt.subplots(figsize=(22,12))\nsns.countplot(y=response['GenderSelect'],order=response['GenderSelect'].value_counts().index)\nplt.show()\n\n# Respondents by Country\nresp_coun=response['Country'].value_counts()[:15].to_frame()\nsns.barplot(resp_coun['Country'],resp_coun.index,palette='inferno')\nplt.title('Top 15 Countries by number of respondents')\nplt.xlabel('')\nfig=plt.gcf()\nfig.set_size_inches(10,10)\nplt.show()\n\ntree=response['Country'].value_counts().to_frame()\nsquarify.plot(sizes=tree['Country'].values,label=tree.index,color=sns.color_palette('RdYlGn_r',52))\nplt.rcParams.update({'font.size':20})\nfig=plt.gcf()\nfig.set_size_inches(40,15)\nplt.show()\n\n# Compensation\nresponse['CompensationAmount']=response['CompensationAmount'].str.replace(',','')\nresponse['CompensationAmount']=response['CompensationAmount'].str.replace('-','')\nrates=pd.read_csv('conversionRates.csv')\nrates.drop('Unnamed: 0',axis=1,inplace=True)\nsalary=response[['CompensationAmount','CompensationCurrency','GenderSelect','Country','CurrentJobTitleSelect']].dropna()\nsalary=salary.merge(rates,left_on='CompensationCurrency',right_on='originCountry',how='left')\nsalary['Salary']=pd.to_numeric(salary['CompensationAmount'])*salary['exchangeRate']\nprint('Maximum Salary is USD $',salary['Salary'].dropna().astype(int).max())\nprint('Minimum Salary is USD $',salary['Salary'].dropna().astype(int).min())\nprint('Median Salary is USD $',salary['Salary'].dropna().astype(int).median())\n\nplt.subplots(figsize=(15,8))\nsalary = salary[salary['Salary']<1000000]\nsns.distplot(salary['Salary'])\nplt.title('Salary Distribution',size=15)\nplt.show()\n\n# Compensation by Country\nf,ax=plt.subplots(1,2,figsize=(18,8))\nsal_coun=salary.groupby('Country')['Salary'].median().sort_values(ascending=False)[:15].to_frame()\nsns.barplot('Salary',sal_coun.index,data=sal_coun,palette='RdYlGn',ax=ax[0])\nax[0].axvline(salary['Salary'].median(),linestyle='dashed')\nax[0].set_title('Highest Salary Paying Countries')\nax[0].set_xlabel('')\nmax_coun=salary.groupby('Country')['Salary'].median().to_frame()\nmax_coun=max_coun[max_coun.index.isin(resp_coun.index)]\nmax_coun.sort_values(by='Salary',ascending=True).plot.barh(width=0.8,ax=ax[1],color=sns.color_palette('RdYlGn'))\nax[1].axvline(salary['Salary'].median(),linestyle='dashed')\nax[1].set_title('Compensation of Top 15 Respondent Countries')\nax[1].set_xlabel('')\nax[1].set_ylabel('')\nplt.subplots_adjust(wspace=0.8)\nplt.show()\n\n# Salary by Gender\nplt.subplots(figsize=(10,8))\nsns.boxplot(y='GenderSelect',x='Salary',data=salary)\nplt.ylabel('')\nplt.show()\n\n# Age\nplt.subplots(figsize=(15,8))\nresponse['Age'].hist(bins=50,edgecolor='black')\nplt.xticks(list(range(0,80,5)))\nplt.title('Age Distribution')\nplt.show()\n\n# Profession & Major\nf,ax = plt.subplots(1,2,figsize=(25,15))\nsns.countplot(y=response['MajorSelect'],ax=ax[0],order=response['MajorSelect'].value_counts().index)\nax[0].set_title('Major')\nax[0].set_ylabel('')\nsns.countplot(y=response['CurrentJobTitleSelect'],ax=ax[1],order=response['CurrentJobTitleSelect'].value_counts().index)\nax[1].set_title('Current Job')\nax[1].set_ylabel('')\nplt.subplots_adjust(wspace=0.8)\nplt.show()\n\n# Compensation by Job Title\nsal_job = salary.groupby('CurrentJobTitleSelect')['Salary'].median().to_frame().sort_values(by='Salary',ascending=False)\nax=sns.barplot(sal_job.Salary,sal_job.index,palette=sns.color_palette('inferno',20))\nplt.title('Compensation By Job Title',size=15)\nfor i, v in enumerate(sal_job.Salary): \n ax.text(.5, i, v,fontsize=10,color='white',weight='bold')\nfig=plt.gcf()\nfig.set_size_inches(8,8)\nplt.show()\n\n# Machine Learning\nf,ax = plt.subplots(1,2,figsize=(25,12))\nskills = response['MLSkillsSelect'].str.split(',')\nskills_set=[]\nfor i in skills.dropna():\n skills_set.extend(i)\nplt1 = pd.Series(skills_set).value_counts().sort_values(ascending=False).to_frame()\nsns.barplot(plt1[0],plt1.index,ax=ax[0],palette=sns.color_palette('inferno_r',15))\nax[0].set_title('ML Skills')\n\ntech=response['MLTechniquesSelect'].str.split(',')\ntechniques=[]\nfor i in tech.dropna():\n techniques.extend(i)\nplt1=pd.Series(techniques).value_counts().sort_values(ascending=False).to_frame()\nsns.barplot(plt1[0],plt1.index,ax=ax[1],palette=sns.color_palette('inferno_r',15))\nax[1].set_title('ML Techniques used')\nplt.subplots_adjust(wspace=0.8)\nplt.show()\n\nf,ax=plt.subplots(1,2,figsize=(25,12))\nml_nxt=response['MLMethodNextYearSelect'].str.split(',')\nnxt_year=[]\nfor i in ml_nxt.dropna():\n nxt_year.extend(i)\npd.Series(nxt_year).value_counts()[:15].sort_values(ascending=True).plot.barh(width=0.9,color=sns.color_palette('winter_r',15),ax=ax[0])\ntool=response['MLToolNextYearSelect'].str.split(',')\ntool_nxt=[]\nfor i in tool.dropna():\n tool_nxt.extend(i)\npd.Series(tool_nxt).value_counts()[:15].sort_values(ascending=True).plot.barh(width=0.9,color=sns.color_palette('winter_r',15),ax=ax[1])\nplt.subplots_adjust(wspace=0.8)\nax[0].set_title('ML Method Next Year')\nax[1].set_title('ML Tool Next Year')\nplt.show()\n\n# Best Platforms to Learn\nplt.subplots(figsize=(6,8))\nlearn=response['LearningPlatformSelect'].str.split(',')\nplatform=[]\nfor i in learn.dropna():\n platform.extend(i)\npd.Series(platform).value_counts()[:15].sort_values(ascending=True).plot.barh(width=0.9,color=sns.color_palette('winter',15))\nplt.title('Best Platforms to Learn',size=15)\nplt.show()\n\n# Hardware Used\nplt.subplots(figsize=(10,10))\nhard=response['HardwarePersonalProjectsSelect'].str.split(',')\nhardware=[]\nfor i in hard.dropna():\n hardware.extend(i)\npd.Series(hardware).value_counts().sort_values(ascending=True).plot.barh(width=0.9,color=sns.color_palette('inferno',10))\nplt.title('Machines Used')\nplt.show()\n\n# Where do i get Datasets from ??\nplt.subplots(figsize=(15,15))\ndata = response['PublicDatasetsSelect'].str.split(',')\ndataset = []\nfor i in data.dropna():\n dataset.extend(i)\npd.Series(dataset).value_counts().plot.pie(autopct='%1.1f%%',colors=sns.color_palette('Paired',10),startangle=90,wedgeprops={'linewidth':2,'edgecolor':'white'})\nplt.title('Dataset Source')\nmy_circle = plt.Circle((0,0),0.7,color='white')\np = plt.gcf()\np.gca().add_artist(my_circle)\nplt.ylabel('')\nplt.show()\n\n# Code Sharing\nplt.subplots(figsize=(15,15))\ncode=response['WorkCodeSharing'].str.split(',')\ncode_share=[]\nfor i in code.dropna():\n code_share.extend(i)\npd.Series(code_share).value_counts().plot.pie(autopct='1.1f%%',shadow=True,colors=sns.color_palette('Set3',10),startangle=90,wedgeprops={'linewidth':2,'edgecolor':'white'})\nplt.title('Dataset Source')\nmy_circle = plt.Circle((0,0),0.7,color='white')\np = plt.gcf()\np.gca().add_artist(my_circle)\nplt.ylabel('')\nplt.show()\n\n# Code Sharing\nplt.subplots(figsize=(15,15))\ncode=response['WorkCodeSharing'].str.split(',')\ncode_share=[]\nfor i in code.dropna():\n code_share.extend(i)\npd.Series(code_share).value_counts().plot.pie(autopct='%1.1f%%',shadow=True,colors=sns.color_palette('Set3',10),startangle=90,wedgeprops = { 'linewidth' : 2, 'edgecolor' : 'white' })\nplt.title('Code Sharing Medium')\nmy_circle=plt.Circle( (0,0), 0.65, color='white')\np=plt.gcf()\np.gca().add_artist(my_circle)\nplt.ylabel('')\nplt.show()\n\n# Challenges in Data Science\nplt.subplots(figsize=(15,18))\nchallenge = response['WorkChallengesSelect'].str.split(',')\nchallenges=[]\nfor i in challenge.dropna():\n challenges.extend(i)\nplt1 = pd.Series(challenges).value_counts().sort_values(ascending=False).to_frame()\nsns.barplot(plt1[0],plt1.index,palette=sns.color_palette('inferno',25))\nplt.title('Challenges in Data Science')\nplt.show()\n\n# Job Satisfaction\nsatisfy = response.copy()\nsatisfy['JobSatisfaction'].replace({'10 - Highly Satisfied':'10','1 - Highly Dissatisfied':'1','I prefer not to share':np.NaN},inplace=True)\nsatisfy.dropna(subset=['JobSatisfaction'],inplace=True)\nsatisfy['JobSatisfaction'] = satisfy['JobSatisfaction'].astype(int)\nsatisfy_job = satisfy.groupby(['CurrentJobTitleSelect'])['JobSatisfaction'].mean().sort_values(ascending=False).to_frame()\nax=sns.barplot(y=satisfy_job.index,x=satisfy_job.JobSatisfaction,palette=sns.color_palette('inferno',20))\nfig=plt.gcf\nfig.set_size_inches(8,10)\nfor i,v in enumerate(satisfy_job.JobSatisfaction):\n ax.text(.1,i,v,fontsize=10,color='white',weight='bold')\nplt.title('Job Satisfaction out of 10')\nplt.show()\n\n# Python Vs R\nresp=response.dropna(subset=['WorkToolsSelect'])\nresp=resp.merge(rates,left_on='CompensationCurrency',right_on='originCountry',how='left')\npython=resp[(resp['WorkToolsSelect'].str.contains('Python'))&(~resp['WorkToolsSelect'].str.contains('R'))]\nR=resp[(~resp['WorkToolsSelect'].str.contains('Python'))&(resp['WorkToolsSelect'].str.contains('R'))]\nboth=resp[(resp['WorkToolsSelect'].str.contains('Python'))&(resp['WorkToolsSelect'].str.contains('R'))]\n\n# Recommended Language for Beginners\nresponse['LanguageRecommendationSelect'].value_counts()[:2].plot.bar()\nplt.show()\n\n# Useful or Not ??\nf,ax=plt.subplots(1,2,figsize=(18,8))\nresponse['JobSkillImportancePython'].value_counts().plot.pie(ax=ax[0],autopct='%1.1f%%',explode=[0.1,0,0],shadow=True,colors=['g','lightblue','r'])\nax[0].set_title('Python Necessity')\nax[0].set_ylabel('')\nresponse['JobSkillImportanceR'].value_counts().plot.pie(ax=ax[1],autopct='%1.1f%%',explode=[0,0.1,0],shadow=True,colors=['lightblue','g','r'])\nax[1].set_title('R Necessity')\nax[1].set_ylabel('')\nplt.show()\n\n# Number Of Users By Language\nf,ax=plt.subplots(1,2,figsize=(18,8))\npd.Series([python.shape[0],R.shape[0],both.shape[0]],index=['Python','R','Both']).plot.bar(ax=ax[0])\nax[0].set_title('Number of Users')\nvenn2(subsets = (python.shape[0],R.shape[0],both.shape[0]), set_labels = ('Python Users', 'R Users'))\nplt.title('Venn Diagram for Users')\nplt.show()\n\n\n# Compensation\npy_sal=(pd.to_numeric(python['CompensationAmount'].dropna())*python['exchangeRate']).dropna()\npy_sal=py_sal[py_sal<1000000]\nR_sal=(pd.to_numeric(R['CompensationAmount'].dropna())*R['exchangeRate']).dropna()\nR_sal=R_sal[R_sal<1000000]\nboth_sal=(pd.to_numeric(both['CompensationAmount'].dropna())*both['exchangeRate']).dropna()\nboth_sal=both_sal[both_sal<1000000]\ntrying=pd.DataFrame([py_sal,R_sal,both_sal])\ntrying=trying.transpose()\ntrying.columns=['Python','R','Both']\nprint('Median Salary For Individual using Python:',trying['Python'].median())\nprint('Median Salary For Individual using R:',trying['R'].median())\nprint('Median Salary For Individual knowing both languages:',trying['Both'].median())\n\ntrying.plot.box()\nplt.title('Compensation By Language')\nfig=plt.gcf()\nfig.set_size_inches(10,6)\nplt.show()\n\n# Language Used By Professionals\npy1 = python.copy()\nr= R.copy()\npy1['WorkToolsSelect'] = 'Python'\nr['WorkToolsSelect'] = 'R'\nr_vs_py = pd.concat([py1,r])\nr_vs_py = r_vs_py.groupby(['CurrentJobTitleSelect','WorkToolsSelect'])['Age'].count().to_frame().reset_index()\nr_vs_py.pivot('CurrentJobTitleSelect','WorkToolsSelect','Age').plot.barh(width=0.8)\nfig = plt.gcf()\nfig.set_size_inches(10,10)\nplt.title('Job Title Vs Language Used',size=15)\nplt.show()\n\n# Job Function Vs Language\nr_vs_py = pd.concat([py1,r])\nr_vs_py = r_vs_py.groupby(['JobFunctionSelect','WorkToolsSelect'])['Age'].count().to_frame().reset_index()\nr_vs_py.pivot('JobFunctionSelect','WorkToolsSelect','Age').plot.barh(width=0.8)\nfig = plt.gcf()\nfig.set_size_inches(10,10)\nplt.title('Job Description Vs Language Used')\nplt.show()\n\n# Tenure Vs Language Used\nr_vs_py = pd.concat([py1,r])\nr_vs_py = r_vs_py.groupby(['Tenure','WorkToolsSelect'])['Age'].count().to_frame().reset_index()\nr_vs_py.pivot('Tenure','WorkToolsSelect','Age').plot.barh(width=0.8)\nfig = plt.gcf()\nfig.set_size_inches(10,10)\nplt.title('Job Tenure Vs Language Used')\nplt.show()\n\n# Industry Vs Language Used\nr_vs_py = pd.concat([py1,r])\nr_vs_py = r_vs_py.groupby(['EmployerIndustry','WorkToolsSelect'])['Age'].count().to_frame().reset_index()\nr_vs_py = r_vs_py.pivot('EmployerIndustry','WorkToolsSelect','Age').plot.barh(width=0.8)\nfig = plt.gcf()\nplt.set_size_inches(10,10)\nplt.title('Industry Vs Language Used')\nplt.show()\n\n# Common Tools with Python and R\nf,ax = plt.subplots(1,2,figsize=(20,15))\npy_comp = python['WorkToolsSelect'].str.split(',')\npy_comp1 = []\nfor i in py_comp:\n py_comp1.extend(i)\nplt1 = pd.Series(py_comp1).value_counts()[1:15].sort_values(ascending=False).to_frame()\nsns.barplot(plt1[0],plt1.index,ax=ax[0],palette=sns.color_palette('inferno_r',15))\nR_comp = R['WorkToolsSelect'].str.split(',')\nR_comp1 = []\nfor i in R_comp:\n R_comp1.extend(i)\nplt1 = pd.Series(R_comp1).value_counts()[1:15].sort_values(ascending=False).to_frame()\nsns.barplot(plt1[0],plt1.index,ax=ax[1],palette=sns.color_palette('inferno_r',15))\nax[0].set_title('Commonly Used Tools with Python')\nax[1].set_title('Commonly Used Tools with R')\nplt.subplots_adjust(wspace=0.8)\nplt.show()\n\n# Asking Data Scientists\nresponse['DataScienceIdentitySelect'].value_counts()\n\n# Current Job Titles\nplt.subplots(figsize=(10,8))\nscientist = response[response['DataScienceIdentitySelect'] == 'Yes']\nscientist['CurrentJobTitleSelect'].value_counts().sort_values(ascending=True).plot.barh(width=0.9,color=sns.color_palette('inferno_r',15))\nplt.title('Job Titles',size=15)\nplt.show()\n\ntrue = response[response['CurrentJobTitleSelect'] == 'Data Scientist']\n\nscientist = pd.concat([scientist,true])\nscientist['CurrentJobTitleSelect'].shape[0]\n\n# Country Wise Split\nplt.subplots(figsize=(10,8))\ncoun = scientist['Country'].value_counts()[:15].sort_values(ascending=False).to_frame()\nsns.barplot(coun.Country,coun.index,palette='inferno')\nplt.title('Countries by No. of Data Scientists',size=15)\nplt.show()\n\n# Employment Status and Education\nf,ax = plt.subplots(1,2,figsize=(25,10))\nsns.countplot(y=scientist['EmploymentStatus'],ax=ax[0])\nax[0].set_title('Employment Status')\nax[0].set_ylabel('')\nsns.countplot(y=scientist['FormalEducation'],order=scientist['FormalEducation'].value_counts().index,ax=ax[1],palette=sns.color_palette('viridis_r',15))\nax[1].set_title('Formal Education')\nax[1].set_ylabel('')\nplt.subplots_adjust(wspace=0.6)\nplt.show()\n\n# Compensation by Formal Education\nplt.subplots(figsize=(25,12))\ncomp_edu = scientist.merge(salary,left_index=True,right_index=True,how='left')\ncomp_edu=comp_edu[['FormalEducation','Salary']]\nsns.boxplot(x='FormalEducation',y='Salary',data=comp_edu)\nplt.title('Compensation vs Education')\nplt.xticks(rotation=90)\nplt.show()\n\n# Previous Job and Salary Change\nf,ax=plt.subplots(1,2,figsize=(30,15))\npast=scientist['PastJobTitlesSelect'].str.split(',')\npast_job=[]\nfor i in past.dropna():\n past_job.extend(i)\npd.Series(past_job).value_counts().sort_values(ascending=True).plot.barh(width=0.9,color=sns.color_palette('summer',25),ax=ax[0])\nax[0].set_title('Previous Job')\nsal=scientist['SalaryChange'].str.split(',')\nsal_change=[]\nfor i in sal.dropna():\n sal_change.extend(i)\npd.Series(sal_change).value_counts().sort_values(ascending=True).plot.barh(width=0.9,color=sns.color_palette('summer',10),ax=ax[1])\nax[1].set_title('Salary Change')\nplt.subplots_adjust(wspace=0.9)\nplt.show()\n\n# Tools Used at work\nplt.subplots(figsize=(8,8))\ntools=scientist['WorkToolsSelect'].str.split(',')\ntools_work=[]\nfor i in tools.dropna():\n tools_work.extend(i)\npd.Series(tools_work).value_counts()[:15].sort_values(ascending=True).plot.barh(width=0.9,color=sns.color_palette('RdYlGn',15))\nplt.show()\n\n# Checking the Free Responses\nfrom wordcloud import WordCloud,STOPWORDS\nimport nltk\nfrom nltk.corpus import stopwords\nfree = pd.read_csv('freeformResponses.csv')\nstop_words = set(stopwords.words('english'))\nstop_words.update(',',';','!','?','.','(',')','$','#','+',':','...')\n","sub_path":"kag_survey_2017.py","file_name":"kag_survey_2017.py","file_ext":"py","file_size_in_byte":16272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"556562104","text":"from collections import defaultdict\n\n\nclass Solution:\n def sumOfDistancesInTree(self, n: int, edges):\n v = defaultdict(lambda: set())\n for s, t in edges:\n v[s].add(t)\n v[t].add(s)\n depths = [0] * n\n subtree = [0] * n\n ret = [0] * n\n\n def traversal(node, parent, depth):\n r = 1\n depths[node] = depth\n for i in v[node]:\n if i != parent:\n r += traversal(i, node, depth + 1)\n subtree[node] = r\n return r\n\n def calculate_distance(node, parent, distance):\n ret[node] = distance\n for i in v[node]:\n if i != parent:\n calculate_distance(i, node, distance + (n - subtree[i]) - subtree[i])\n\n traversal(0, -1, 0)\n calculate_distance(0, -1, sum(depths))\n return ret\n","sub_path":"src/leetcode/P3963.py","file_name":"P3963.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"418975018","text":"from matrix_utils import print_m\n\ntext = open('22.txt').readlines()\n\ngrid = []\nfor line in text:\n\trow = []\n\tfor c in line.strip():\n\t\trow += [1 if c == '#' else 0]\n\tgrid += [row]\n\n'''\ngrid = [[0 for _ in range(9)] for _ in range(9)]\ngrid[4][3] = 1\ngrid[3][5] = 1\n#'''\n\nprint_m(grid)\n\nrow = int(len(grid)/2)\ncol = int(len(grid)/2)\nprint(\"starting at \" + str(row) + \" \" + str(col))\niterations = 10000000\ndirections = [(-1, 0), (0, 1), (1, 0), (0, -1)]\ncurr_dir = 0\ninfections = 0\nexpansions = 0\n\nfor i in range(iterations):\n\tif grid[row][col] == 1: # infected\n\t\tcurr_dir += 1 # turn right\n\t\tgrid[row][col] = 7 # become flagged\n\telif grid[row][col] == 5: # weakened\n\t\tcurr_dir += 0 # dont turn\n\t\tgrid[row][col] = 1 # become infected\n\t\tinfections += 1\n\telif grid[row][col] == 7: # flagged\n\t\tcurr_dir += 2 # turn around\n\t\tgrid[row][col] = 0 # become clean\n\telse: # clean\n\t\tcurr_dir -= 1 # turn left\n\t\tgrid[row][col] = 5 # become weakened\n\t\n\tcurr_dir %= len(directions)\n\tdelta = directions[curr_dir]\n\trow, col = row + delta[0], col + delta[1]\n\t\n\t# expand grid if necessary\n\tif row < 0:\n\t\tgrid.insert(0, [0 for _ in range(len(grid[0]))])\n\t\trow += 1\n\t\texpansions[0] += 1\n\tif row >= len(grid):\n\t\tgrid += [[0 for _ in range(len(grid[0]))]]\n\t\texpansions[1] += 1\n\tif col < 0:\n\t\tfor r in grid:\n\t\t\tr.insert(0, 0)\n\t\tcol += 1\n\t\texpansions[2] += 1\n\tif col >= len(grid[row]):\n\t\tfor r in grid:\n\t\t\tr += [0]\n\t\texpansions[3] += 1\n\t\t\t\n\tif i % int(iterations/100) == 0:\n\t\tprint(\"done with iteration \" + str(i) + \", expanded \" + str(expansions) + \" times\") \n\t\texpansions = [0, 0, 0, 0]\n\t\t\n\nprint(\"after \" + str(iterations) + \" iterations: \\n\")\n#print_m(grid)\nprint(str(infections) + \" infections occurred\")\n","sub_path":"22.py","file_name":"22.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"198892743","text":"'''\n\tAuthor: Kiana Hosaka\n\tDate Last Modified: February 19, 2020\n\tDescription: Test file for Weekday Schedules\n'''\n\n# Weekday: Sunday Night, Monday, Tuesday, Wedesday, Thursday\n'''\nprimary_1 = [\"Ally\", \"Maddie\", \"Alia\", \"Josiah\", \"Fran\"]\nsecondary_1 = [\"Maddie\", \"Alia\", \"Ally\", \"Sam\", \"Jacob\"]\nprimary_2 = [\"Ally\", \"Maddie\", \"Alia\", \"Josiah\", \"Fran\"]\nsecondary_2 = [\"Maddie\", \"Alia\", \"Ally\", \"Sam\", \"Jacob\"]\nprimary_3 = [\"Ally\", \"Maddie\", \"Alia\", \"Josiah\", \"Fran\"]\nsecondary_3 = [\"Maddie\", \"Alia\", \"Ally\", \"Sam\", \"Jacob\"]\nprimary_4 = [\"Ally\", \"Maddie\", \"Alia\", \"Josiah\", \"Fran\"]\nsecondary_4 = [\"Maddie\", \"Alia\", \"Ally\", \"Sam\", \"Jacob\"]\nprimary_5 = [\"Ally\", \"Maddie\", \"Alia\", \"Josiah\", \"Fran\"]\nsecondary_5 = [\"Maddie\", \"Alia\", \"Ally\", \"Sam\", \"Jacob\"]\nprimary_6 = [\"Ally\", \"Maddie\", \"Alia\", \"Josiah\", \"Fran\"]\nsecondary_6 = [\"Maddie\", \"Alia\", \"Ally\", \"Sam\", \"Jacob\"]\nprimary_7 = [\"Ally\", \"Maddie\", \"Alia\", \"Josiah\", \"Fran\"]\nsecondary_7 = [\"Maddie\", \"Alia\", \"Ally\", \"Sam\", \"Jacob\"]\nprimary_8 = [\"Ally\", \"Maddie\", \"Alia\", \"Josiah\", \"Fran\"]\nsecondary_8 = [\"Maddie\", \"Alia\", \"Ally\", \"Sam\", \"Jacob\"]\nprimary_9 = [\"Ally\", \"Maddie\", \"Alia\", \"Josiah\", \"Fran\"]\nsecondary_9 = [\"Maddie\", \"Alia\", \"Ally\", \"Sam\", \"Jacob\"]\nprimary_10 = [\"Ally\", \"Maddie\", \"Alia\", \"Josiah\", \"Fran\"]\nsecondary_10 = [\"Maddie\", \"Alia\", \"Ally\", \"Sam\", \"Jacob\"]\n'''\n\nprimary_1 = [\"P1 Sun Night\", \"P1 Monday\", \"P1 Tuesday\", \"P1 Wednesday\", \"P1 Thursday\"]\nsecondary_1 = [\"S1 Sun Night\", \"S1 Monday\", \"S1 Tuesday\", \"S1 Wednesday\", \"S1 Thursday\"]\nprimary_2 = [\"P2 Sun Night\", \"P2 Monday\", \"P2 Tuesday\", \"P2 Wednesday\", \"P2 Thursday\"]\nsecondary_2 = [\"S2 Sun Night\", \"S2 Monday\", \"S2 Tuesday\", \"S2 Wednesday\", \"S2 Thursday\"]\nprimary_3 = [\"P3 Sun Night\", \"P3 Monday\", \"P3 Tuesday\", \"P3 Wednesday\", \"P3 Thursday\"]\nsecondary_3 = [\"S3 Sun Night\", \"S3 Monday\", \"S3 Tuesday\", \"S3 Wednesday\", \"S3 Thursday\"]\nprimary_4 = [\"P4 Sun Night\", \"P4 Monday\", \"P4 Tuesday\", \"P4 Wednesday\", \"P4 Thursday\"]\nsecondary_4 = [\"S4 Sun Night\", \"S4 Monday\", \"S4 Tuesday\", \"S4 Wednesday\", \"S4 Thursday\"]\nprimary_5 = [\"P5 Sun Night\", \"P5 Monday\", \"P5 Tuesday\", \"P5 Wednesday\", \"P5 Thursday\"]\nsecondary_5 = [\"S5 Sun Night\", \"S5 Monday\", \"S5 Tuesday\", \"S5 Wednesday\", \"S5 Thursday\"]\nprimary_6 = [\"P6 Sun Night\", \"P6 Monday\", \"P6 Tuesday\", \"P6 Wednesday\", \"P6 Thursday\"]\nsecondary_6 = [\"S6 Sun Night\", \"S6 Monday\", \"S6 Tuesday\", \"S6 Wednesday\", \"S6 Thursday\"]\nprimary_7 = [\"P7 Sun Night\", \"P7 Monday\", \"P7 Tuesday\", \"P7 Wednesday\", \"P7 Thursday\"]\nsecondary_7 = [\"S7 Sun Night\", \"S7 Monday\", \"S7 Tuesday\", \"S7 Wednesday\", \"S7 Thursday\"]\nprimary_8 = [\"P8 Sun Night\", \"P8 Monday\", \"P8 Tuesday\", \"P8 Wednesday\", \"P8 Thursday\"]\nsecondary_8 = [\"S8 Sun Night\", \"S8 Monday\", \"S8 Tuesday\", \"S8 Wednesday\", \"S8 Thursday\"]\nprimary_9 = [\"P9 Sun Night\", \"P9 Monday\", \"P9 Tuesday\", \"P9 Wednesday\", \"P9 Thursday\"]\nsecondary_9 = [\"S9 Sun Night\", \"S9 Monday\", \"S9 Tuesday\", \"S9 Wednesday\", \"S9 Thursday\"]\nprimary_10 = [\"P10 Sun Night\", \"P10 Monday\", \"P10 Tuesday\", \"P10 Wednesday\", \"P10 Thursday\"]\nsecondary_10 = [\"S10 Sun Night\", \"S10 Monday\", \"S10 Tuesday\", \"S10 Wednesday\", \"S10 Thursday\"]\n\nschedule = [[primary_1, secondary_1], [primary_2, secondary_2], [primary_3, secondary_3], \\\n\t\t[primary_4, secondary_4], [primary_5, secondary_5], [primary_6, secondary_6], \\\n\t\t[primary_7, secondary_7], [primary_8, secondary_8], [primary_9, secondary_9], \\\n\t\t[primary_10, secondary_10]]\n","sub_path":"test_week 2.py","file_name":"test_week 2.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"392154428","text":"import numpy as np\nimport pandas as pd\n\nfrom glob import glob\nfrom pathlib import Path\nfrom skimage.measure import regionprops_table\nfrom vedo import mesh, Spheres, Spline\n\nfrom imlib.pandas.misc import initialise_df\nfrom imlib.source.source_files import source_custom_config_amap\nfrom imlib.general.list import unique_elements_lists\nfrom imlib.general.system import ensure_directory_exists\n\nfrom neuro.generic_neuro_tools import save_brain\nfrom neuro.visualise.brainrender_tools import (\n volume_to_vector_array_to_obj_file,\n load_regions_into_brainrender,\n)\nfrom neuro.atlas_tools.array import lateralise_atlas\nfrom neuro.atlas_tools.misc import get_voxel_volume, get_atlas_pixel_sizes\nfrom neuro.structures.structures_tree import (\n atlas_value_to_name,\n UnknownAtlasValue,\n)\nfrom neuro.visualise.napari_tools.layers import (\n prepare_load_nii,\n add_new_label_layer,\n)\n\n\ndef summarise_brain_regions(label_layers, filename):\n summaries = []\n for label_layer in label_layers:\n summaries.append(summarise_single_brain_region(label_layer))\n\n result = pd.concat(summaries)\n\n volume_header = \"volume_mm3\"\n length_columns = [\n \"x_min_um\",\n \"y_min_um\",\n \"z_min_um\",\n \"x_max_um\",\n \"y_max_um\",\n \"z_max_um\",\n \"x_center_um\",\n \"y_center_um\",\n \"z_center_um\",\n ]\n\n result.columns = [\"region\"] + [volume_header] + length_columns\n\n atlas_pixel_sizes = get_atlas_pixel_sizes(source_custom_config_amap())\n voxel_volume = get_voxel_volume(source_custom_config_amap()) / (1000 ** 3)\n\n result[volume_header] = result[volume_header] * voxel_volume\n\n for header in length_columns:\n for dim in atlas_pixel_sizes.keys():\n if header.startswith(dim):\n scale = float(atlas_pixel_sizes[dim])\n assert scale > 0\n\n result[header] = result[header] * scale\n\n result.to_csv(filename, index=False)\n\n\ndef summarise_single_brain_region(\n label_layer,\n ignore_empty=True,\n properties_to_fetch=[\"area\", \"bbox\", \"centroid\",],\n):\n data = label_layer.data\n if ignore_empty:\n if data.sum() == 0:\n return\n\n # swap data back to original orientation from napari orientation\n data = np.swapaxes(data, 2, 0)\n\n regions_table = regionprops_table(data, properties=properties_to_fetch)\n df = pd.DataFrame.from_dict(regions_table)\n df.insert(0, \"Region\", label_layer.name)\n return df\n\n\ndef add_existing_track_layers(\n viewer, track_file, point_size, x_scaling, y_scaling, z_scaling\n):\n max_z = len(viewer.layers[0].data)\n data = brainrender_track_to_napari(\n track_file, x_scaling, y_scaling, z_scaling, max_z\n )\n new_points_layer = viewer.add_points(\n data, n_dimensional=True, size=point_size, name=Path(track_file).stem,\n )\n new_points_layer.mode = \"ADD\"\n return new_points_layer\n\n\ndef brainrender_track_to_napari(\n track_file, x_scaling, y_scaling, z_scaling, max_z\n):\n points = pd.read_hdf(track_file)\n points[\"x\"] = points[\"x\"] / z_scaling\n points[\"z\"] = points[\"z\"] / x_scaling\n points[\"y\"] = points[\"y\"] / y_scaling\n\n points[\"x\"] = max_z - points[\"x\"]\n\n return points.to_numpy().astype(np.int16)\n\n\ndef add_existing_label_layers(\n viewer,\n label_file,\n selected_label=1,\n num_colors=10,\n brush_size=30,\n memory=False,\n):\n \"\"\"\n Loads an existing (nii) image as a napari labels layer\n :param viewer: Napari viewer instance\n :param label_file: Filename of the image to be loaded\n :param int selected_label: Label ID to be preselected\n :param int num_colors: How many colors (labels)\n :param int brush_size: Default size of the label brush\n :return label_layer: napari labels layer\n \"\"\"\n label_file = Path(label_file)\n labels = prepare_load_nii(label_file, memory=memory)\n label_layer = viewer.add_labels(\n labels, num_colors=num_colors, name=label_file.stem\n )\n label_layer.selected_label = selected_label\n label_layer.brush_size = brush_size\n return label_layer\n\n\ndef save_regions_to_file(\n label_layer,\n destination_directory,\n template_image,\n ignore_empty=True,\n obj_ext=\".obj\",\n image_extension=\".nii\",\n):\n \"\"\"\n Analysed the regions (to see what brain areas they are in) and saves\n the segmented regions to file (both as .obj and .nii)\n :param label_layer: napari labels layer (with segmented regions)\n :param destination_directory: Where to save files to\n :param template_image: Existing image of size/shape of the\n destination images\n the values in \"annotations\" and a \"name column\"\n :param ignore_empty: If True, don't attempt to save empty images\n :param obj_ext: File extension for the obj files\n :param image_extension: File extension fo the image files\n \"\"\"\n data = label_layer.data\n if ignore_empty:\n if data.sum() == 0:\n return\n\n # swap data back to original orientation from napari orientation\n data = np.swapaxes(data, 2, 0)\n name = label_layer.name\n\n filename = destination_directory / (name + obj_ext)\n volume_to_vector_array_to_obj_file(\n data, filename,\n )\n\n filename = destination_directory / (name + image_extension)\n save_brain(\n data, template_image, filename,\n )\n\n\ndef analyse_region_brain_areas(\n label_layer,\n destination_directory,\n annotations,\n hemispheres,\n structures_reference_df,\n extension=\".csv\",\n ignore_empty=True,\n):\n \"\"\"\n\n :param label_layer: napari labels layer (with segmented regions)\n :param np.array annotations: numpy array of the brain area annotations\n :param np.array hemispheres: numpy array of hemipshere annotations\n :param structures_reference_df: Pandas dataframe with \"id\" column (matching\n the values in \"annotations\" and a \"name column\"\n :param ignore_empty: If True, don't analyse empty regions\n \"\"\"\n\n data = label_layer.data\n if ignore_empty:\n if data.sum() == 0:\n return\n\n # swap data back to original orientation from napari orientation\n data = np.swapaxes(data, 2, 0)\n name = label_layer.name\n\n masked_annotations = data.astype(bool) * annotations\n\n # TODO: don't hardcode hemisphere value. Get from atlas config\n annotations_left, annotations_right = lateralise_atlas(\n masked_annotations,\n hemispheres,\n left_hemisphere_value=2,\n right_hemisphere_value=1,\n )\n\n unique_vals_left, counts_left = np.unique(\n annotations_left, return_counts=True\n )\n unique_vals_right, counts_right = np.unique(\n annotations_right, return_counts=True\n )\n\n voxel_volume = get_voxel_volume(source_custom_config_amap())\n voxel_volume_in_mm = voxel_volume / (1000 ** 3)\n\n df = initialise_df(\n \"structure_name\",\n \"left_volume_mm3\",\n \"left_percentage_of_total\",\n \"right_volume_mm3\",\n \"right_percentage_of_total\",\n \"total_volume_mm3\",\n \"percentage_of_total\",\n )\n\n sampled_structures = unique_elements_lists(\n list(unique_vals_left) + list(unique_vals_right)\n )\n total_volume_region = get_total_volume_regions(\n unique_vals_left, unique_vals_right, counts_left, counts_right\n )\n\n for atlas_value in sampled_structures:\n if atlas_value != 0:\n try:\n df = add_structure_volume_to_df(\n df,\n atlas_value,\n structures_reference_df,\n unique_vals_left,\n unique_vals_right,\n counts_left,\n counts_right,\n voxel_volume_in_mm,\n total_volume_voxels=total_volume_region,\n )\n\n except UnknownAtlasValue:\n print(\n \"Value: {} is not in the atlas structure reference file. \"\n \"Not calculating the volume\".format(atlas_value)\n )\n filename = destination_directory / (name + extension)\n df.to_csv(filename, index=False)\n\n\ndef get_total_volume_regions(\n unique_vals_left, unique_vals_right, counts_left, counts_right,\n):\n zero_index_left = np.where(unique_vals_left == 0)[0][0]\n counts_left = list(counts_left)\n counts_left.pop(zero_index_left)\n\n zero_index_right = np.where(unique_vals_right == 0)[0][0]\n counts_right = list(counts_right)\n counts_right.pop(zero_index_right)\n\n return sum(counts_left + counts_right)\n\n\ndef add_structure_volume_to_df(\n df,\n atlas_value,\n structures_reference_df,\n unique_vals_left,\n unique_vals_right,\n counts_left,\n counts_right,\n voxel_volume,\n total_volume_voxels=None,\n):\n name = atlas_value_to_name(atlas_value, structures_reference_df)\n\n left_volume, left_percentage = get_volume_in_hemisphere(\n atlas_value,\n unique_vals_left,\n counts_left,\n total_volume_voxels,\n voxel_volume,\n )\n right_volume, right_percentage = get_volume_in_hemisphere(\n atlas_value,\n unique_vals_right,\n counts_right,\n total_volume_voxels,\n voxel_volume,\n )\n if total_volume_voxels is not None:\n total_percentage = left_percentage + right_percentage\n else:\n total_percentage = 0\n\n df = df.append(\n {\n \"structure_name\": name,\n \"left_volume_mm3\": left_volume,\n \"left_percentage_of_total\": left_percentage,\n \"right_volume_mm3\": right_volume,\n \"right_percentage_of_total\": right_percentage,\n \"total_volume_mm3\": left_volume + right_volume,\n \"percentage_of_total\": total_percentage,\n },\n ignore_index=True,\n )\n return df\n\n\ndef get_volume_in_hemisphere(\n atlas_value, unique_vals, counts, total_volume_voxels, voxel_volume\n):\n try:\n index = np.where(unique_vals == atlas_value)[0][0]\n volume = counts[index] * voxel_volume\n if total_volume_voxels is not None:\n percentage = 100 * (counts[index] / total_volume_voxels)\n else:\n percentage = 0\n except IndexError:\n volume = 0\n percentage = 0\n\n return volume, percentage\n\n\ndef convert_and_save_points(\n points_layers,\n output_directory,\n x_scaling,\n y_scaling,\n z_scaling,\n max_z,\n track_file_extension=\".h5\",\n):\n \"\"\"\n Converts the points from the napari format (in image space) to brainrender\n (in atlas space)\n :param points_layers: list of points layers\n :param output_directory: path to save points to\n :param x_scaling: scaling from image space to brainrender scene\n :param y_scaling: scaling from image space to brainrender scene\n :param z_scaling: scaling from image space to brainrender scene\n :param max_z: Maximum extent of the image in z\n \"\"\"\n\n output_directory = Path(output_directory)\n ensure_directory_exists(output_directory)\n\n for points_layer in points_layers:\n save_single_track_layer(\n points_layer,\n output_directory,\n x_scaling,\n y_scaling,\n z_scaling,\n max_z,\n track_file_extension=track_file_extension,\n )\n\n\ndef save_single_track_layer(\n layer,\n output_directory,\n x_scaling,\n y_scaling,\n z_scaling,\n max_z,\n track_file_extension=\".h5\",\n):\n output_filename = output_directory / (layer.name + track_file_extension)\n cells = layer.data.astype(np.int16)\n cells = pd.DataFrame(cells)\n\n cells.columns = [\"x\", \"y\", \"z\"]\n\n # weird scaling due to the ARA coordinate space\n cells[\"x\"] = max_z - cells[\"x\"]\n cells[\"x\"] = z_scaling * cells[\"x\"]\n cells[\"z\"] = x_scaling * cells[\"z\"]\n cells[\"y\"] = y_scaling * cells[\"y\"]\n cells.to_hdf(output_filename, key=\"df\", mode=\"w\")\n\n\ndef analyse_track(\n scene,\n points_file,\n add_surface_to_points=True,\n spline_points=100,\n fit_degree=3,\n spline_smoothing=0.05,\n point_radius=30,\n spline_radius=10,\n verbose=True,\n):\n \"\"\"\n Given a file of points, fit a spline function, and add to a brainrender\n scene.\n :param scene: brainrender scene object\n :param points_file:\n :param bool add_surface_to_points: Add the closest part of the brain\n surface to the list of points\n :param spline_points: How many points define the spline\n :param fit_degree: spline fit degree\n :param spline_smoothing: spline fit smoothing\n :param point_radius: size of the points in the brainrender scene\n :param spline_radius: size of the rendered spline in the brainrender\n scene\n :param bool verbose: Whether to print the progress\n :return:\n scene: brainrender scene with the surface point added.\n spline: vedo spline object\n \"\"\"\n points = pd.read_hdf(points_file)\n scene.add_cells(\n points,\n color_by_region=True,\n res=12,\n radius=point_radius,\n verbose=False,\n )\n points = np.array(points)\n\n if add_surface_to_points:\n scene, points = add_surface_point_to_points(\n scene, points, point_radius, verbose=verbose\n )\n\n far_point = np.expand_dims(points[-1], axis=0)\n scene.add_vtkactor(Spheres(far_point, r=point_radius).color(\"n\"))\n\n spline = (\n Spline(\n points,\n smooth=spline_smoothing,\n degree=fit_degree,\n res=spline_points,\n )\n .pointSize(spline_radius)\n .color(\"n\")\n )\n\n return scene, spline\n\n\ndef add_surface_point_to_points(\n scene, points, point_radius, color=\"n\", verbose=True\n):\n \"\"\"\n Adds the closest part of the brain surface to the list of points. Returns\n the brainrender scene with the point added, and the point added to the\n list of points\n :param scene: brainrender scene object\n :param points: List of points\n :param point_radius: Radius of the point when displayed\n :param bool verbose: Whether to print the progress\n :return:\n scene: brainrender scene with the surface point added.\n points: list of points with the surface point added.\n \"\"\"\n if verbose:\n print(\n \"Finding the closest point on the brain surface to the first point\"\n )\n root_mesh = mesh.Mesh(scene.root)\n surface_intersection = np.expand_dims(\n root_mesh.closestPoint(points[0]), axis=0\n )\n points = np.concatenate([surface_intersection, points], axis=0)\n scene.add_vtkactor(\n Spheres(surface_intersection, r=point_radius).color(color)\n )\n return scene, points\n\n\ndef analyse_track_anatomy(scene, spline, file_path, verbose=True):\n \"\"\"\n For a given spline, and brainrender scene, find the brain region that each\n \"segment\" is in, and save to csv.\n\n :param scene: brainrender scene object\n :param spline: vtkplotter spline object\n :param file_path: path to save the results to\n :param bool verbose: Whether to print the progress\n \"\"\"\n if verbose:\n print(\"Determining the brain region for each segment of the spline\")\n spline_regions = [\n scene.atlas.get_structure_from_coordinates(p, just_acronym=False)\n for p in spline.points().tolist()\n ]\n\n df = pd.DataFrame(\n columns=[\"Position\", \"Region ID\", \"Region acronym\", \"Region name\"]\n )\n for idx, spline_region in enumerate(spline_regions):\n if spline_region is None:\n df = df.append(\n {\n \"Position\": idx,\n \"Region ID\": \"Not found in brain\",\n \"Region acronym\": \"Not found in brain\",\n \"Region name\": \"Not found in brain\",\n },\n ignore_index=True,\n )\n else:\n df = df.append(\n {\n \"Position\": idx,\n \"Region ID\": spline_region[\"id\"],\n \"Region acronym\": spline_region[\"acronym\"],\n \"Region name\": spline_region[\"name\"],\n },\n ignore_index=True,\n )\n if verbose:\n print(f\"Saving results to: {file_path}\")\n df.to_csv(file_path, index=False)\n\n\ndef display_track_in_brainrender(\n scene, spline, regions_to_add=[], region_alpha=0.3, verbose=True\n):\n \"\"\"\n\n :param scene: brainrender scene object\n :param spline: vtkplotter spline object\n :param regions_to_add: List of additional brain regions to add, as a list\n of acronyms\n :param region_alpha: Opacity of the displayed regions\n :param bool verbose: Whether to print the progress\n \"\"\"\n if verbose:\n print(\"Visualising 3D data in brainrender\")\n scene.add_vtkactor(spline)\n scene.add_brain_regions(regions_to_add, alpha=region_alpha)\n scene.verbose = False\n return scene\n\n\ndef view_in_brainrender(\n scene,\n spline,\n regions_directory,\n alpha=0.8,\n shading=\"flat\",\n region_to_add=[],\n region_alpha=0.3,\n):\n obj_files = glob(str(regions_directory) + \"/*.obj\")\n if obj_files:\n scene = load_regions_into_brainrender(\n scene, obj_files, alpha=alpha, shading=shading\n )\n try:\n scene = display_track_in_brainrender(\n scene,\n spline,\n regions_to_add=region_to_add,\n region_alpha=region_alpha,\n )\n except:\n pass\n\n scene.render()\n\n\ndef add_new_track_layer(viewer, track_layers, point_size):\n num = len(track_layers)\n new_track_layers = viewer.add_points(\n n_dimensional=True, size=point_size, name=f\"track_{num}\",\n )\n new_track_layers.mode = \"ADD\"\n track_layers.append(new_track_layers)\n\n\ndef add_new_region_layer(\n viewer, label_layers, image_like, brush_size, num_colors\n):\n num = len(label_layers)\n new_label_layer = add_new_label_layer(\n viewer,\n image_like,\n name=f\"region_{num}\",\n brush_size=brush_size,\n num_colors=num_colors,\n )\n new_label_layer.mode = \"PAINT\"\n label_layers.append(new_label_layer)\n\n\ndef add_existing_region_segmentation(\n directory, viewer, label_layers, file_extension, memory=False\n):\n label_files = glob(str(directory) + \"/*\" + file_extension)\n if directory and label_files != []:\n for label_file in label_files:\n label_layers.append(\n add_existing_label_layers(viewer, label_file, memory=memory)\n )\n","sub_path":"neuro/segmentation/manual_segmentation/man_seg_tools.py","file_name":"man_seg_tools.py","file_ext":"py","file_size_in_byte":18422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"26365157","text":"#!/usr/bin/env python\r\n\r\nimport argparse\r\nimport cairo\r\nfrom random import random\r\nimport re\r\n\r\n# Global constants for image output\r\nLEFT_MARGIN = 20\r\nGENE_HEIGHT = 100\r\nVERTICAL_BUFFER = GENE_HEIGHT / 5\r\n\r\nclass GeneGroup:\r\n ''' Object to hold other objects. One GeneGroup for each fasta sequence in the input.'''\r\n def __init__(self):\r\n self.gene = Gene()\r\n self.exon = Exon()\r\n self.header = FastaHeader()\r\n self.motifs = []\r\n self.number = 0\r\n\r\n\r\nclass FastaHeader():\r\n '''FASTA sequence header name. Contains method for writting visual output above each\r\n gene image.'''\r\n def __init__(self):\r\n self.name = \"\"\r\n def draw(self, context, gene_count):\r\n x = LEFT_MARGIN\r\n y = GENE_HEIGHT * gene_count\r\n context.set_source_rgb(0,0,0)\r\n context.move_to(x,y)\r\n context.set_font_size(10)\r\n context.show_text(self.name)\r\n\r\n\r\nclass Exon:\r\n '''Exon object stores location of exon in FASTA sequence. Can handle multiple exons.\r\n Also contains function for visualizing exons as dark boxes in output'''\r\n def __init__(self):\r\n self.locations = []\r\n def draw(self, context, gene_number):\r\n for location in self.locations:\r\n start, stop = location\r\n x = LEFT_MARGIN + start\r\n y = GENE_HEIGHT * gene_number + VERTICAL_BUFFER*2\r\n width = stop - start\r\n height = VERTICAL_BUFFER\r\n context.set_source_rgb(0,0,0)\r\n context.rectangle(x,y,width,height)\r\n context.fill()\r\n\r\n\r\nclass Gene:\r\n '''The gene sequence. Finds its width and scales the output image size accordingly.\r\n Also contains method to write gene visualization as a line.'''\r\n def __init__(self):\r\n self.sequence = \"\"\r\n self.width = 0\r\n def append_sequence(self, seq):\r\n self.sequence += seq\r\n self.width = len(self.sequence)\r\n def draw_gene(self, context, gene_number):\r\n x = LEFT_MARGIN\r\n y = GENE_HEIGHT * gene_number + (GENE_HEIGHT / 2)\r\n context.set_source_rgb(0,0,0)\r\n context.set_line_width(1)\r\n context.move_to(x,y)\r\n context.line_to(x + self.width, y)\r\n context.stroke()\r\n\r\n\r\nclass Motif:\r\n '''Object for each possible motif for each FASTA entry. If there are 4 motifs being\r\n searched for, each entry can have 4 motif objects. The motif object stores all \r\n instances of that motif in that sequence. Visualizes motifs in the output as colored\r\n boxes.'''\r\n def __init__(self):\r\n self.sequence = \"\"\r\n self.locations = []\r\n def draw_motif(self, context, gene_count, pallete):\r\n for location in self.locations:\r\n start, stop = location\r\n width = stop - start\r\n x = LEFT_MARGIN + start\r\n y = GENE_HEIGHT * gene_count + (GENE_HEIGHT/2) - 20\r\n height = 40\r\n r,g,b,a = pallete[self.sequence]\r\n context.set_source_rgba(r,g,b,0.7)\r\n context.set_line_width(1)\r\n context.move_to(x,y)\r\n context.rectangle(x, y, width, height)\r\n context.fill()\r\n context.stroke()\r\n\r\n\r\ndef get_args():\r\n '''\r\n Takes user arguments at runtime. One argument for the input fasta file to scan, one for the output\r\n directory, and one for the file of motifs to search for. \r\n '''\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('-f', \"--file\", type=str, help=\"The input sequences in fasta format\", required=True)\r\n parser.add_argument('-m', \"--motifs\", type=str, help=\"Text file containing motifs, 1 per line\", required=True)\r\n parser.add_argument('-c', \"--color_randomizer\", help=\"Optional flag to randomly generate colors. Needed for inputs of >10 motifs\", action=\"store_true\")\r\n return parser.parse_args()\r\n\r\n\r\ndef get_motifs(motifs):\r\n ''' Translates the provided motifs into Regex search patterns using IUPAC nucleotide codes'''\r\n iupac_dict = {\r\n \"A\":\"[Aa]\",\r\n \"C\":\"[Cc]\",\r\n \"G\":\"[Gg]\",\r\n \"T\":\"[TtUu]\",\r\n \"U\":\"[UuTt]\",\r\n \"W\":\"[AaTtUu]\",\r\n \"S\":\"[CcGg]\",\r\n \"M\":\"[AaCc]\",\r\n \"K\":\"[GgTtUu]\",\r\n \"R\":\"[AaGg]\",\r\n \"Y\":\"[CcTtUu]\",\r\n \"B\":\"[CcGgTtUu]\",\r\n \"D\":\"[AaGgTtUu]\",\r\n \"H\":\"[AaCcTtUu]\",\r\n \"V\":\"[AaCcGg]\",\r\n \"N\":\"[AaCcGgTtUu]\",\r\n \"Z\":\"[]\",\r\n }\r\n translated_motifs = {}\r\n motif_number = {}\r\n motif_count = 1\r\n with open(motifs, \"r\") as fh:\r\n for line in fh:\r\n motif = line.strip()\r\n motif_number[motif_count] = motif\r\n translated_motif = \"\"\r\n for character in motif.upper():\r\n translated_motif += iupac_dict[character]\r\n translated_motifs[motif] = translated_motif\r\n motif_count += 1\r\n #outputs the regex pattern for the motif and a key to turn the regex string back into the original motif\r\n return motif_number, translated_motifs\r\n\r\n\r\ndef exon_finder(sequence):\r\n '''\r\n Finds exons in fasta sequences by looking for capitalized nucleotides. Returns the start and\r\n stop positions of capitialized stretches within the sequence.\r\n '''\r\n iterator = re.finditer(\"[A-Z]+\", sequence)\r\n # returns start and stop position, 0 indexed\r\n exons = []\r\n for match in iterator:\r\n exons.append(match.span())\r\n # exons as list of tuples with start stop position\r\n return exons\r\n\r\n\r\ndef motif_finder(sequence, motif):\r\n '''\r\n Finds regions in a sequence that match a motif. Return the start and stop positions \r\n of each matching region. \r\n '''\r\n #Number of nucleotides in motif\r\n motif_len = len(re.findall(\"\\[[a-zA-Z]*\\]\", motif))\r\n #number of nucleotides in query sequence\r\n seq_len = len(sequence)\r\n #used for windows larger than the motif sequence\r\n still_in_motif = False\r\n motif_count = 0\r\n #store start and stop locations for each motif\r\n motif_locations = {}\r\n for i in range(0, seq_len-motif_len+1):\r\n sliding_window = sequence[i:i+motif_len]\r\n if re.fullmatch(motif, sliding_window):\r\n #sliding window in fasta matches the motif\r\n if not still_in_motif:\r\n #first instance of motif\r\n #adjusting positions for 0 base counting\r\n motif_start = i + 1\r\n motif_end = i + 1 + motif_len\r\n still_in_motif = True\r\n motif_count += 1\r\n elif still_in_motif:\r\n #consecutive encounters of this motif\r\n #motif window extends beyond length of motif, update the end position\r\n motif_end = i + 1 + motif_len\r\n elif still_in_motif:\r\n #end of motif window, store bounds\r\n motif_locations[motif_count] = [motif_start, motif_end]\r\n still_in_motif = False\r\n #return just the positions, as only 1 motif is provided at a time\r\n return [(locations) for number, locations in motif_locations.items()]\r\n\r\n\r\ndef generate_pallete(motifs, randomizer):\r\n '''\r\n Create list of pre-approved generated RBG values for each motif, to be used in graphical representation.\r\n If The user opts for random colors, they are randomly generated.\r\n '''\r\n pre_approved_colors = [[0.94, 0.57, 0.91, 1],\r\n [0.72, 0.89, 0.43, 1],\r\n [0.48, 0.8, 0.76, 1],\r\n [0.16, 0.54, 0.74, 1],\r\n [0.70, 0.34, 0.02, 1],\r\n [0.87, 0.50, 0.07, 1],\r\n [0.99, 0.72, 0.38, 1],\r\n [0.99, 0.87, 0.74, 1],\r\n [0.84, 0.85, 0.92, 1],\r\n [0.69, 0.67, 0.82, 1],\r\n [0.50, 0.45, 0.67, 1],\r\n [0.32, 0.15, 0.53,1],]\r\n colors = {}\r\n motif_count = 0\r\n for motif in motifs:\r\n if not randomizer:\r\n colors[motif] = pre_approved_colors[motif_count]\r\n else:\r\n r,g,b,a = random(), random(), random(), 0.7\r\n colors[motif] = [r,g,b,a]\r\n motif_count += 1 \r\n return colors\r\n\r\n\r\ndef create_genes(gene_fasta, motif_number, motif_regex):\r\n '''\r\n Iterates through the input FASTA. For each record, the objects are created.\r\n No output is generated at this time, just getting all information.\r\n '''\r\n #initiallize dictionary of gene objects\r\n gene_groups = {}\r\n gene_count = 0\r\n with open(gene_fasta, \"r\") as fh:\r\n #iterating through input fasta\r\n for line in fh:\r\n if line[0] == \">\":\r\n #New fasta entry for a gene\r\n gene_count += 1\r\n #define new object\r\n gene_groups[gene_count] = GeneGroup()\r\n #add the gene name\r\n gene_groups[gene_count].header.name = line.strip()\r\n #which gene is this in the fasta\r\n gene_groups[gene_count].number = gene_count\r\n else:\r\n #add the fasta sequence to the gene object\r\n gene_groups[gene_count].gene.append_sequence(line.strip())\r\n gene_count = 0\r\n for a in gene_groups:\r\n #iterating through gene objects\r\n gene_group = gene_groups[a]\r\n #find the exons in the gene's sequence\r\n exons = exon_finder(gene_group.gene.sequence)\r\n #store exon locations in gene_group object\r\n gene_group.exon.locations = exons\r\n #loop through motifs\r\n for motif_count in motif_number:\r\n #ACTG motif\r\n standard_motif = motif_number[motif_count]\r\n #Regex search string\r\n regex_motif = motif_regex[standard_motif]\r\n #list of BP positions for this motif in this gene\r\n motif_locations = motif_finder(gene_group.gene.sequence, regex_motif)\r\n if motif_locations:\r\n #if motif appears at least once in sequence, store it in object\r\n motif = Motif()\r\n motif.sequence = standard_motif\r\n motif.locations = motif_locations\r\n #add the motif to a list for that gene_group\r\n gene_group.motifs.append(motif)\r\n gene_count += 1\r\n return gene_groups\r\n\r\n\r\ndef drawing(gene_objects, output_file, pallete, input_file, motif_number):\r\n '''\r\n With gene objects generated, now we iterate through the GeneGroup objects\r\n to visualize all the child objects in the groups. At the end, it calls\r\n add_legend to add the figure legend.\r\n '''\r\n #figure out how wide to make the output based on the input gene lengths\r\n output_width = 0\r\n for genes in gene_objects:\r\n if gene_objects[genes].gene.width > output_width:\r\n output_width = gene_objects[genes].gene.width\r\n with cairo.SVGSurface(output_file, output_width + 2*LEFT_MARGIN, (len(gene_objects) +2) * GENE_HEIGHT) as surface:\r\n context = cairo.Context(surface)\r\n #loop through gene_groups\r\n gene_count = 0\r\n for count in gene_objects:\r\n gene_count += 1\r\n gene_group = gene_objects[count]\r\n gene_group.gene.draw_gene(context, count)\r\n gene_group.exon.draw(context, count)\r\n gene_group.header.draw(context, count)\r\n for motif in gene_group.motifs:\r\n motif.draw_motif(context, count, pallete)\r\n add_legend(context, motif_number, gene_count+1, pallete)\r\n\r\n\r\ndef add_legend(context, motif_number, gene_count, pallete):\r\n '''\r\n Messy, but only needs calling once. Adds a visual key at the bottom of the output\r\n to show what the colored motif boxes correspond to.\r\n '''\r\n legend_x, legend_y = LEFT_MARGIN, VERTICAL_BUFFER + GENE_HEIGHT * gene_count\r\n legend_horiz_spacing = 100\r\n legend_vert_spacing = GENE_HEIGHT * 0.1\r\n context.move_to(legend_x, legend_y - legend_vert_spacing)\r\n context.set_font_size(10)\r\n context.set_source_rgba(1,1,1,1)\r\n context.show_text(\"Motif legend\")\r\n motif_count = 0\r\n line = 0\r\n for motif in motif_number:\r\n #wrapping so 4 motifs appear in each line\r\n if motif_count % 4 == 0:\r\n line += 1\r\n x1, y1 = (legend_x + (motif_count % 4) * legend_horiz_spacing), (legend_y + legend_vert_spacing * line)\r\n context.move_to(x1, y1)\r\n #cute lil boxes\r\n context.rectangle(x1,y1,GENE_HEIGHT*0.05, GENE_HEIGHT * 0.05)\r\n r,g,b,a = pallete[motif_number[motif]]\r\n context.set_source_rgba(r,g,b,a)\r\n context.fill()\r\n #write the original motif\r\n x1, y1 = x1 + GENE_HEIGHT * 0.07, y1 + GENE_HEIGHT * 0.07\r\n context.move_to(x1,y1)\r\n context.set_source_rgb(0,0,0)\r\n context.show_text(motif_number[motif].upper())\r\n motif_count += 1\r\n\r\n\r\ndef main():\r\n #collect user parameters\r\n args = get_args()\r\n #get the regex form of motifs for searching \r\n motif_number, motif_regex = get_motifs(args.motifs)\r\n #making a color pallete for visual representation of motifs\r\n pallete = generate_pallete(motif_number.values(), args.color_randomizer)\r\n #create gene objects\r\n gene_objects = create_genes(args.file, motif_number, motif_regex)\r\n #the name of the file to output\r\n output_name = args.file.split(\".\")[0]\r\n output_name += \".svg\"\r\n #passing in gene objects, color pallete, filenames, etc into drawing function for visual output\r\n drawing(gene_objects, output_name, pallete, args.file, motif_number)\r\n\r\n\r\nmain()","sub_path":"oop_motifmark.py","file_name":"oop_motifmark.py","file_ext":"py","file_size_in_byte":13372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"477142026","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math as math\n\n#---------Setting the seed of the generator to get consistent test--------------\nnp.random.seed(10)\n\n#---------Setting the problem paramaters that suite the problem-----------------\nntime = 100000\nnreal = 100000\n\nstepright = 1\nstepleft = -1\n\npright = float(0.6)\npleft = float(0.4)\n\n#---------Creating an (1000,) with random integers from (0-9)-------------------\nd0 = np.random.randint(0,10,(nreal,ntime))\n\n#------------Changing the values of d0 to get weighted movements----------------\nd0[d0 < 6] = stepright\nd0[d0 >= 6] = stepleft\n\ntestd0 = np.insert(d0,0,0,axis=1)\n\n#-----------------------Calulating the mean-------------------------------------\ntest1 = np.cumsum(testd0,axis = 1)\n\nnt = np.arange(ntime+1)\n\navg = np.sum(test1, axis = 0)/float(nreal)\n\ndistavg = nt*(2*(.6) - 1)\n\n#-----------------------Square mean value---------------------------------------\nsqavg = np.sum(test1**2, axis = 0)/float(ntime)\n\n#-----------------------Calulating the Variance---------------------------------\nvar = sqavg - avg**2\n\ndistvar = np.square(nt)*(4*(pright**2) - 4*pright + 1) + 4*nt*pright*pleft - np.square(distavg)\n\n#---------------------Creating and saving the plot------------------------------\nfig = plt.figure(1, figsize = (5.0,5.0))\n\n#--------------------Plotting a sample path for reference of walk---------------\nplt.subplot(211)\nplt.title('1D Random Walk 1st Path', fontsize=14)\nplt.xlabel('Step #', fontsize = 12)\nplt.ylabel('Position', fontsize = 12)\nplt.plot(test1[1], 'm')\n\n#-------------------Plotting the mean/var from data and distrbutions------------\nplt.subplot(212)\nplt.title('AVG/VAR(from data), AVG/VAR from dist.', fontsize=14)\nplt.xlabel('Step #', fontsize = 12)\nplt.ylabel('Position', fontsize = 12)\nplt.plot(avg,'r')\nplt.plot(var,'b')\nplt.plot(distavg, 'r--')\nplt.plot(distvar, 'b--')\n\n#-----------------------------adjusting subplots--------------------------------\nplt.subplots_adjust(hspace=0.5, wspace=0.1)\n#plt.show()\n#-----------------------------Saving the figure---------------------------------\nfig.savefig(\"/Users/Parker/Desktop/Walks/Newplots/1DRW100000WNew.png\", format = \"png\", dpi=800)\n","sub_path":"1DRW100000WNEW.py","file_name":"1DRW100000WNEW.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"108286494","text":"from .bounding_box_attachment import BoundingBoxAttachment\nfrom .skinned_mesh_attachment import SkinnedMeshAttachment\nfrom .attachment_loader import AttachmentLoader\nfrom .region_attachment import RegionAttachment\nfrom .mesh_attachment import MeshAttachment\n\n\nclass AtlasAttachmentLoader(AttachmentLoader):\n def __init__(self, atlas):\n self.atlas = atlas\n\n def new_region_attachment(self, skin, name, path):\n region = self.atlas.find_region(path)\n if region is None:\n raise RuntimeError(f\"Region not found in atlas: {path} (region attachment: {name})\")\n attachment = RegionAttachment(name)\n attachment.region = region\n return attachment\n\n def new_mesh_attachment(self, skin, name, path):\n region = self.atlas.find_region(path)\n if region is None:\n raise RuntimeError(f\"Region not found in atlas: {path} (region attachment: {name})\")\n attachment = MeshAttachment(name)\n\n attachment.region = region\n return attachment\n\n def new_skinned_mesh_attachment(self, skin, name, path):\n region = self.atlas.find_region(path)\n if region is None:\n raise RuntimeError(f\"Region not found in atlas: {path} (region attachment: {name})\")\n\n attachment = SkinnedMeshAttachment(name)\n\n attachment.region = region\n return attachment\n\n def new_bounding_box_attachment(self, skin, name):\n return BoundingBoxAttachment(name)\n","sub_path":"spine/libspine/attachments/atlas_attachment_loader.py","file_name":"atlas_attachment_loader.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"358572775","text":"import unittest\nfrom tests import no_jython\nfrom voom.amqp.config import AMQPQueueDescriptor\nfrom voom.amqp.gateway import AMQPGateway\nimport pika\nfrom voom.bus import VoomBus\nfrom voom.codecs import ContentCodecRegistry\nfrom voom.codecs.json_codec import JSONMessageCodec\nfrom voom.amqp.events import AMQPDataReceived, AMQPGatewayReady\nfrom voom.decorators import receiver\nfrom voom.gateway import GatewayShutdownCmd\nfrom logging import basicConfig, getLogger\nimport threading\nimport logging\n\nbasicConfig(level=logging.INFO)\n\nLOG = getLogger(__name__)\n\n\nclass TestMultThread(unittest.TestCase):\n @no_jython\n def test_send_other_thread(self):\n self.rlock = threading.RLock()\n self.rlock.acquire()\n\n def send_on_ready():\n LOG.info(\"waiting for lock...\")\n with self.rlock:\n LOG.info(\"lock acquired, sending\")\n properties = pika.BasicProperties(content_type='application/json',\n reply_to=g.return_queue.queue)\n self.g.send([range(0, 10)], properties, exchange='', routing_key=work.queue)\n\n LOG.info(\"lock released\")\n\n t = threading.Thread(target=send_on_ready)\n t.daemon = True\n t.start()\n\n work = AMQPQueueDescriptor(\"test_multithread\", declare=True, exclusive=False, auto_delete=True)\n\n g = AMQPGateway(work.queue,\n pika.ConnectionParameters(host='localhost'),\n [work],\n VoomBus(),\n ContentCodecRegistry(JSONMessageCodec()))\n\n self.g = g\n bus = g.bus\n bus.raise_errors = True\n self.msgs = []\n\n @receiver(AMQPDataReceived)\n def receives(msg):\n assert isinstance(msg, AMQPDataReceived)\n self.msgs.append(msg)\n bus.publish(GatewayShutdownCmd())\n\n @receiver(AMQPGatewayReady)\n def on_ready(msg):\n LOG.info(\"releasing...\")\n self.rlock.release()\n\n bus.register(receives)\n bus.register(on_ready)\n\n g.run()\n assert len(self.msgs) == 1\n msg = self.msgs.pop(0)\n assert isinstance(msg, AMQPDataReceived)\n assert msg.headers['From'] == g.return_queue.queue\n assert msg.headers['Content-Type'] == 'application/json'\n assert msg.headers['Reply-To'] == g.return_queue.queue\n assert msg.headers['Routing-Key'] == work.queue\n assert msg.messages == [range(10)]\n","sub_path":"tests/amqp/test_gateway_multithread.py","file_name":"test_gateway_multithread.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"6515110","text":"# coding: utf-8\n\nimport pytest\nimport json\nfrom aiohttp import web\n\nfrom api_server.models.async_response import AsyncResponse\nfrom api_server.models.duty_cost_details import DutyCostDetails\n\n\nasync def test_send_duty_costing(client):\n \"\"\"Test case for send_duty_costing\n\n \n \"\"\"\n body = {\n \"amount\" : 0.8008281904610115,\n \"costingType\" : \"estimated\"\n}\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/api/duty/v1/duties/{id}/costing'.format(id='id_example'),\n headers=headers,\n json=body,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')\n\n","sub_path":"Core/duty/python-aiohttp/tests/test_costing_controller.py","file_name":"test_costing_controller.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"105974103","text":"#!/usr/bin/env python3\n\nimport sys\nsys.path.append('lib') # noqa\n\nfrom ops.charm import CharmBase\nfrom ops.main import main\nfrom ops.model import (\n ActiveStatus,\n BlockedStatus,\n MaintenanceStatus,\n WaitingStatus,\n)\n\nfrom db_instance_manager import DbInstanceManager\nfrom cluster import CockroachDbCluster\n\n\nclass CockroachDbCharm(CharmBase):\n\n PSQL_PORT = 26257\n HTTP_PORT = 8080\n\n # A type to use for the database instance manager. The class attribute is\n # used to inject a different type during unit testing.\n instance_manager_cls = DbInstanceManager\n\n def __init__(self, framework, key):\n super().__init__(framework, key)\n\n self.framework.observe(self.on.install, self._on_install)\n self.framework.observe(self.on.start, self._on_start)\n self.framework.observe(self.on.config_changed, self._on_config_changed)\n self.framework.observe(self.on.cluster_relation_changed, self._on_cluster_relation_changed)\n\n self.cluster = CockroachDbCluster(self, 'cluster')\n self.instance_manager = self.instance_manager_cls(\n self, None, self.is_single_node, self.cluster)\n self.framework.observe(self.instance_manager.on.cluster_initialized,\n self.cluster.on_cluster_initialized)\n\n self.framework.observe(self.instance_manager.on.daemon_started, self._on_daemon_started)\n\n def _on_install(self, event):\n self.instance_manager.install()\n\n @property\n def is_single_node(self):\n \"\"\"Both replication factors were set to 1 so it's a good guess that an operator wants\n a 1-node deployment.\"\"\"\n default_zone_rf = self.model.config['default-zone-replicas']\n system_data_rf = self.model.config['system-data-replicas']\n return default_zone_rf == 1 and system_data_rf == 1\n\n def _on_start(self, event):\n # If both replication factors are set to 1 and the current unit != initial cluster unit,\n # don't start the process if the cluster has already been initialized.\n # This configuration is not practical in real deployments (i.e. multiple units, RF=1).\n initial_unit = self.cluster.initial_unit\n if self.is_single_node and (\n initial_unit is not None and self.unit.name != initial_unit):\n self.unit.status = BlockedStatus('Extra unit in a single-node deployment.')\n return\n self.instance_manager.start()\n\n if self.cluster.is_joined and self.cluster.is_cluster_initialized:\n self.unit.status = ActiveStatus()\n\n def _on_cluster_relation_changed(self, event):\n self.instance_manager.reconfigure()\n if self.instance_manager.is_started and self.cluster.is_cluster_initialized:\n self.unit.status = ActiveStatus()\n\n def _on_daemon_started(self, event):\n if not self.cluster.is_joined and not self.is_single_node:\n self.unit.status = WaitingStatus('Waiting for peer units to join.')\n event.defer()\n return\n if self.cluster.is_cluster_initialized:\n # Skip this event when some other unit has already initialized a cluster.\n self.unit.status = ActiveStatus()\n return\n elif not self.unit.is_leader():\n self.unit.status = WaitingStatus(\n 'Waiting for the leader unit to initialize a cluster.')\n event.defer()\n return\n self.unit.status = MaintenanceStatus('Initializing the cluster.')\n # Initialize the cluster if we're a leader in a multi-node deployment, otherwise it have\n # already been initialized by running start-single-node.\n if not self.is_single_node and self.model.unit.is_leader():\n self.instance_manager.init_db()\n\n self.unit.status = ActiveStatus()\n\n def _on_config_changed(self, event):\n self.instance_manager.reconfigure()\n\n\nif __name__ == '__main__':\n main(CockroachDbCharm)\n","sub_path":"src/charm.py","file_name":"charm.py","file_ext":"py","file_size_in_byte":3969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"393590951","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\nclass Solution:\n def rotate(self,node,k):\n if not node or not node.next:\n return node\n num=1\n p=node\n while p.next:\n num+=1\n p=p.next\n\n #找前一段链表\n k=num-k%num\n p=node\n while k<1:\n p=p.next\n k-=1\n head=p.next\n if not head:\n return head\n #前一段链表最后置空\n p.next=None\n\n #拼接两个链表\n p=head\n while p.next:\n p=p.next\n p.next=node\n return head\n\n","sub_path":"61rotateRight/rotateRight.py","file_name":"rotateRight.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"27097696","text":"import datetime\nimport re\nimport urllib.parse\nfrom typing import Optional\n\nfrom tibiapy import abc\nfrom tibiapy.enums import NewsCategory, NewsType\nfrom tibiapy.errors import InvalidContent\nfrom tibiapy.utils import parse_tibia_date, parse_tibiacom_content, try_enum\n\n__all__ = (\n \"News\",\n \"ListedNews\",\n)\n\n\nICON_PATTERN = re.compile(r\"newsicon_([^_]+)_(?:small|big)\")\n\n\nclass News(abc.BaseNews, abc.Serializable):\n \"\"\"Represents a news entry.\n\n Attributes\n ----------\n id: :class:`int`\n The internal ID of the news entry.\n title: :class:`str`\n The title of the news entry.\n category: :class:`NewsCategory`\n The category this belongs to.\n category_icon: :class:`str`\n The URL of the icon corresponding to the category.\n date: :class:`datetime.date`\n The date when the news were published.\n content: :class:`str`, optional\n The raw html content of the entry.\n thread_id: :class:`int`, optional\n The thread id of the designated discussion thread for this entry.\n \"\"\"\n def __init__(self, news_id, title, content, date, category, **kwargs):\n self.id: int = news_id\n self.title: str = title\n self.content: str = content\n self.date: datetime.date = date\n self.category: NewsCategory = category\n self.thread_id: Optional[int] = kwargs.get(\"thread_id\", None)\n self.category_icon: Optional[str] = kwargs.get(\"category_icon\")\n\n # id, title, category and date inherited from BaseNews.\n __slots__ = (\n \"id\",\n \"title\",\n \"category\",\n \"category_icon\",\n \"date\",\n \"content\",\n \"thread_id\",\n )\n\n @property\n def thread_url(self):\n \"\"\":class:`str`: The URL to the thread discussing this news entry, if any.\"\"\"\n return abc.BaseThread.get_url(self.thread_id) if self.thread_id else None\n\n @classmethod\n def from_content(cls, content, news_id=0):\n \"\"\"\n Gets a news entry by its HTML content from Tibia.com\n\n Notes\n -----\n Since there's no way to obtain the entry's Id from the page contents, it will always be 0.\n A news_id can be passed to set the news_id of the resulting object.\n\n Parameters\n ----------\n content: :class:`str`\n The HTML content of the page.\n news_id: :class:`int`, optional\n The news_id belonging to the content being parsed.\n\n Returns\n -------\n :class:`News`\n The news article found in the page.\n\n Raises\n ------\n InvalidContent\n If content is not the HTML of a news' page.\n \"\"\"\n if \"(no news with id \" in content:\n return None\n try:\n parsed_content = parse_tibiacom_content(content)\n # Read Information from the headline\n headline = parsed_content.find(\"div\", attrs={\"class\": \"NewsHeadline\"})\n img = headline.find('img')\n img_url = img[\"src\"]\n category_name = ICON_PATTERN.search(img_url)\n category = try_enum(NewsCategory, category_name.group(1))\n title_div = headline.find(\"div\", attrs={\"class\": \"NewsHeadlineText\"})\n title = title_div.text.replace('\\xa0', ' ')\n date_div = headline.find(\"div\", attrs={\"class\": \"NewsHeadlineDate\"})\n date_str = date_div.text.replace('\\xa0', ' ').replace('-', '').strip()\n date = parse_tibia_date(date_str)\n\n # Read the page's content.\n content_table = parsed_content.find(\"table\")\n content_row = content_table.find(\"td\")\n content = content_row.encode_contents().decode()\n thread_id = None\n thread_div = content_table.find(\"div\")\n if thread_div:\n news_link = thread_div.find('a')\n url = urllib.parse.urlparse(news_link[\"href\"])\n query = urllib.parse.parse_qs(url.query)\n thread_id = int(query[\"threadid\"][0])\n\n return cls(news_id, title, content, date, category, thread_id=thread_id, category_icon=img_url)\n except AttributeError:\n raise InvalidContent(\"content is not from the news archive section in Tibia.com\")\n\n\nclass ListedNews(abc.BaseNews, abc.Serializable):\n \"\"\"Represents a news entry.\n\n Attributes\n ----------\n id: :class:`int`\n The internal ID of the news entry.\n title: :class:`str`\n The title of the news entry.\n News tickers have a fragment of their content as a title.\n category: :class:`NewsCategory`\n The category this belongs to.\n category_icon: :class:`str`\n The URL of the icon corresponding to the category.\n date: :class:`datetime.date`\n The date when the news were published.\n type: :class:`NewsType`\n The type of news of this list entry.\n \"\"\"\n __slots__ = (\n \"id\",\n \"title\",\n \"category\",\n \"category_icon\",\n \"date\",\n \"type\",\n )\n\n def __init__(self, news_id, title, news_type, category, date, **kwargs):\n self.id: int = news_id\n self.title: str = title\n self.type: NewsType = news_type\n self.category: NewsCategory = category\n self.date: datetime.datetime = date\n self.category_icon: Optional[str] = kwargs.get(\"category_icon\", None)\n\n def __repr__(self):\n return f\"<{self.__class__.__name__} id={self.id} title={self.title!r} type={self.type!r} \" \\\n f\"category={self.category!r} date={self.date!r}>\"\n\n @classmethod\n def list_from_content(cls, content):\n \"\"\"\n Gets a list of news from the HTML content of the news search page.\n\n Parameters\n ----------\n content: :class:`str`\n The HTML content of the page.\n\n Returns\n -------\n :class:`list` of :class:`ListedNews`\n List of news in the search results.\n\n Raises\n ------\n InvalidContent\n If content is not the HTML of a news search's page.\n \"\"\"\n try:\n parsed_content = parse_tibiacom_content(content)\n tables = parsed_content.find_all(\"table\", attrs={\"width\": \"100%\"})\n news = []\n news_table = tables[0]\n title_row = news_table.find(\"td\", attrs={\"class\": \"white\", \"colspan\": \"3\"})\n if title_row.text != \"Search Results\":\n raise InvalidContent(\"content is not from the news archive section in Tibia.com\")\n rows = news_table.find_all(\"tr\", attrs={\"class\": [\"Odd\", \"Even\"]})\n for row in rows:\n cols_raw = row.find_all('td')\n if len(cols_raw) != 3:\n continue\n entry = cls._parse_entry(cols_raw)\n news.append(entry)\n return news\n except (AttributeError, IndexError):\n raise InvalidContent(\"content is not from the news archive section in Tibia.com\")\n\n @classmethod\n def _parse_entry(cls, cols_raw):\n img = cols_raw[0].find('img')\n img_url = img[\"src\"]\n category_name = ICON_PATTERN.search(img_url)\n category = try_enum(NewsCategory, category_name.group(1))\n for br in cols_raw[1].find_all(\"br\"):\n br.replace_with(\"\\n\")\n date_str, news_type_str = cols_raw[1].text.splitlines()\n date = parse_tibia_date(date_str)\n news_type_str = news_type_str.replace('\\xa0', ' ')\n news_type = try_enum(NewsType, news_type_str)\n title = cols_raw[2].text\n news_link = cols_raw[2].find('a')\n url = urllib.parse.urlparse(news_link[\"href\"])\n query = urllib.parse.parse_qs(url.query)\n news_id = int(query[\"id\"][0])\n return cls(news_id, title, news_type, category, date, category_icon=img_url)\n","sub_path":"tibiapy/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":7834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"511267985","text":"# i从1循环至字符串长度的一半,所有字符串长度能整除的i即代表所有可能的子字符串长度;\n# 判断子字符串延长给定倍数后是否等于原字符串;\n\nclass Solution(object):\n def repeatedSubstringPattern(self, s):\n \"\"\"\n :type s: str\n :rtype: bool\n \"\"\"\n for i in range(1,len(s)//2+1):\n if len(s)%i == 0:\n if s[:i]*(len(s)//i) == s:\n return True\n\n return False\n\n\ndef main():\n s = \"ababab\"\n myResult = Solution()\n print(myResult.repeatedSubstringPattern(s))\n\nif __name__ == '__main__':\n main()\n\n # # 字典:\n # # s = \"ababba\"\n # if len(s)==1:\n # return False\n # dict = {}\n # for i in s:\n # if i not in dict.keys():\n # dict[i] = 1\n # else:\n # count = dict[i]\n # count += 1\n # dict[i] = count\n # print(dict) # {'a': 3, 'b': 3}\n # print(dict.keys()) # dict_keys(['a', 'b'])\n # print(dict.items()) # dict_items([('a', 3), ('b', 3)])\n # print(dict.values()) # dict_values([3, 3])\n # tmp = list(dict.values())\n # print(tmp) # [3, 3]","sub_path":"easy/459-重复的子字符串.py","file_name":"459-重复的子字符串.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"11417325","text":"import configargparse\nimport glob\nimport os\nimport codecs\nimport gc\n\nimport torch\nimport torchtext.vocab\nfrom collections import Counter, OrderedDict\n\nimport onmt.constants as Constants\nimport onmt.opts as opts\nfrom inputters.dataset import get_fields, build_dataset, make_text_iterator_from_file\nfrom utils.logging import init_logger, logger\n\n\ndef save_fields_to_vocab(fields):\n \"\"\"\n Save Vocab objects in Field objects to `vocab.pt` file.\n \"\"\"\n vocab = []\n for k, f in fields.items():\n if f is not None and 'vocab' in f.__dict__:\n f.vocab.stoi = f.vocab.stoi\n vocab.append((k, f.vocab))\n return vocab\n\ndef build_field_vocab(field, counter, **kwargs):\n specials = list(OrderedDict.fromkeys(\n tok for tok in [field.unk_token, field.pad_token, field.init_token,\n field.eos_token]\n if tok is not None))\n field.vocab = field.vocab_cls(counter, specials=specials, **kwargs)\n\ndef merge_vocabs(vocabs, vocab_size=None, min_frequency=1):\n merged = sum([vocab.freqs for vocab in vocabs], Counter())\n return torchtext.vocab.Vocab(merged,\n specials=[Constants.UNK_WORD, Constants.PAD_WORD,\n Constants.BOS_WORD, Constants.EOS_WORD],\n max_size=vocab_size,\n min_freq=min_frequency) \n\ndef build_vocab(train_dataset_files, fields, share_vocab,\n src_vocab_size, src_words_min_frequency,\n tgt_vocab_size, tgt_words_min_frequency):\n counter = {}\n\n for k in fields:\n counter[k] = Counter()\n\n # Load vocabulary\n for _, path in enumerate(train_dataset_files):\n dataset = torch.load(path)\n logger.info(\" * reloading %s.\" % path)\n for ex in dataset.examples:\n for k in fields:\n val = getattr(ex, k, None)\n if not fields[k].sequential:\n continue\n counter[k].update(val)\n\n dataset.examples = None\n gc.collect()\n del dataset.examples\n gc.collect()\n del dataset\n gc.collect()\n\n build_field_vocab(fields[\"tgt\"], counter[\"tgt\"],\n max_size=tgt_vocab_size,\n min_freq=tgt_words_min_frequency)\n logger.info(\" * tgt vocab size: %d.\" % len(fields[\"tgt\"].vocab))\n\n\n build_field_vocab(fields[\"src\"], counter[\"src\"],\n max_size=src_vocab_size,\n min_freq=src_words_min_frequency)\n logger.info(\" * src vocab size: %d.\" % len(fields[\"src\"].vocab))\n\n # Merge the input and output vocabularies.\n if share_vocab:\n # `tgt_vocab_size` is ignored when sharing vocabularies\n logger.info(\" * merging src and tgt vocab...\")\n merged_vocab = merge_vocabs(\n [fields[\"src\"].vocab, fields[\"tgt\"].vocab],\n vocab_size=src_vocab_size,\n min_frequency=src_words_min_frequency)\n fields[\"src\"].vocab = merged_vocab\n fields[\"tgt\"].vocab = merged_vocab\n logger.info(\" * src vocab size: %d.\" % len(fields[\"src\"].vocab))\n logger.info(\" * tgt vocab size: %d.\" % len(fields[\"tgt\"].vocab))\n\n return fields\n\ndef parse_args():\n parser = configargparse.ArgumentParser(\n description='preprocess.py',\n config_file_parser_class=configargparse.YAMLConfigFileParser,\n formatter_class=configargparse.ArgumentDefaultsHelpFormatter)\n\n opts.config_opts(parser)\n opts.preprocess_opts(parser)\n\n opt = parser.parse_args()\n torch.manual_seed(opt.seed)\n\n return opt\n\ndef build_save_in_shards_using_shards_size(src_corpus, tgt_corpus, fields,\n corpus_type, opt):\n src_data = []\n tgt_data = []\n with open(src_corpus, \"r\") as src_file:\n with open(tgt_corpus, \"r\") as tgt_file:\n for s, t in zip(src_file, tgt_file):\n src_data.append(s)\n tgt_data.append(t)\n if len(src_data) != len(tgt_data):\n raise AssertionError(\"Source and Target should \\\n have the same length\")\n\n num_shards = int(len(src_data) / opt.shard_size)\n for x in range(num_shards):\n logger.info(\"Splitting shard %d.\" % x)\n f = codecs.open(src_corpus + \".{0}.txt\".format(x), \"w\",\n encoding=\"utf-8\")\n f.writelines(\n src_data[x * opt.shard_size: (x + 1) * opt.shard_size])\n f.close()\n f = codecs.open(tgt_corpus + \".{0}.txt\".format(x), \"w\",\n encoding=\"utf-8\")\n f.writelines(\n tgt_data[x * opt.shard_size: (x + 1) * opt.shard_size])\n f.close()\n num_written = num_shards * opt.shard_size\n if len(src_data) > num_written:\n logger.info(\"Splitting shard %d.\" % num_shards)\n f = codecs.open(src_corpus + \".{0}.txt\".format(num_shards),\n 'w', encoding=\"utf-8\")\n f.writelines(\n src_data[num_shards * opt.shard_size:])\n f.close()\n f = codecs.open(tgt_corpus + \".{0}.txt\".format(num_shards),\n 'w', encoding=\"utf-8\")\n f.writelines(\n tgt_data[num_shards * opt.shard_size:])\n f.close()\n src_list = sorted(glob.glob(src_corpus + '.*.txt'))\n tgt_list = sorted(glob.glob(tgt_corpus + '.*.txt'))\n\n ret_list = []\n\n for index, src in enumerate(src_list):\n logger.info(\"Building shard %d.\" % index)\n src_iter = make_text_iterator_from_file(src)\n tgt_iter = make_text_iterator_from_file(tgt_list[index])\n dataset = build_dataset(\n fields,\n src_iter,\n tgt_iter,\n src_seq_length=opt.src_seq_length,\n tgt_seq_length=opt.tgt_seq_length,\n src_seq_length_trunc=opt.src_seq_length_trunc,\n tgt_seq_length_trunc=opt.tgt_seq_length_trunc\n )\n\n pt_file = \"{:s}_{:s}.{:d}.pt\".format(\n opt.save_data, corpus_type, index)\n\n # We save fields in vocab.pt seperately, so make it empty.\n dataset.fields = []\n\n logger.info(\" * saving %sth %s data shard to %s.\"\n % (index, corpus_type, pt_file))\n torch.save(dataset, pt_file)\n\n ret_list.append(pt_file)\n os.remove(src)\n os.remove(tgt_list[index])\n del dataset.examples\n gc.collect()\n del dataset\n gc.collect()\n\n return ret_list\n\ndef store_vocab_to_file(vocab, filename):\n with open(filename, \"w\") as f:\n for i, token in enumerate(vocab.itos):\n f.write(str(i)+ ' ' + token + '\\n')\n f.close()\n\ndef build_save_vocab(train_dataset, fields, opt):\n \"\"\" Building and saving the vocab \"\"\"\n fields = build_vocab(train_dataset, fields,\n opt.share_vocab,\n opt.src_vocab_size,\n opt.src_words_min_frequency,\n opt.tgt_vocab_size,\n opt.tgt_words_min_frequency)\n\n # Can't save fields, so remove/reconstruct at training time.\n vocab_file = opt.save_data + '_vocab.pt'\n torch.save(save_fields_to_vocab(fields), vocab_file)\n store_vocab_to_file(fields['src'].vocab, opt.save_data + '_src_vocab')\n store_vocab_to_file(fields['tgt'].vocab, opt.save_data + '_tgt_vocab')\n \ndef build_save_dataset(corpus_type, fields, opt):\n \"\"\" Building and saving the dataset \"\"\"\n assert corpus_type in ['train', 'valid']\n\n if corpus_type == 'train':\n src_corpus = opt.train_src\n tgt_corpus = opt.train_tgt\n else:\n src_corpus = opt.valid_src\n tgt_corpus = opt.valid_tgt\n\n if (opt.shard_size > 0):\n return build_save_in_shards_using_shards_size(src_corpus,\n tgt_corpus,\n fields,\n corpus_type,\n opt)\n\n # We only build a monolithic dataset.\n # But since the interfaces are uniform, it would be not hard\n # to do this should users need this feature.\n src_iter = make_text_iterator_from_file(src_corpus)\n tgt_iter = make_text_iterator_from_file(tgt_corpus)\n dataset = build_dataset(\n fields,\n src_iter,\n tgt_iter,\n src_seq_length=opt.src_seq_length,\n tgt_seq_length=opt.tgt_seq_length,\n src_seq_length_trunc=opt.src_seq_length_trunc,\n tgt_seq_length_trunc=opt.tgt_seq_length_trunc)\n\n # We save fields in vocab.pt seperately, so make it empty.\n dataset.fields = []\n\n pt_file = \"{:s}_{:s}.pt\".format(opt.save_data, corpus_type)\n logger.info(\" * saving %s dataset to %s.\" % (corpus_type, pt_file))\n torch.save(dataset, pt_file)\n\n return [pt_file]\n\ndef main():\n opt = parse_args()\n if (opt.shuffle > 0):\n raise AssertionError(\"-shuffle is not implemented, please make sure \\\n you shuffle your data before pre-processing.\")\n init_logger(opt.log_file)\n logger.info(\"Input args: %r\", opt)\n logger.info(\"Extracting features...\")\n\n logger.info(\"Building `Fields` object...\")\n fields = get_fields()\n\n logger.info(\"Building & saving training data...\")\n train_dataset_files = build_save_dataset('train', fields, opt)\n\n logger.info(\"Building & saving validation data...\")\n build_save_dataset('valid', fields, opt)\n\n logger.info(\"Building & saving vocabulary...\")\n\n build_save_vocab(train_dataset_files, fields, opt)\n\nif __name__ == \"__main__\":\n main()\n \n","sub_path":"opennmt-baseline/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":9046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"69707376","text":"import json\nimport socket\nimport struct\nfrom custom_types import QuestionType\n\n\nHOST = 'localhost'\nPORT = 12000\n\nclass ServerHandler:\n\n def __init__(self) -> None:\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n def __enter__(self):\n self._socket.connect((HOST, PORT))\n\n return self\n\n def __exit__(self, exception_type, exception_value, traceback) -> None:\n self._socket.close()\n\n def _receive_json(self) -> bytes:\n length_buff = self._recvall(4)\n\n if length_buff is None:\n return None\n length, = struct.unpack('!I', length_buff)\n return self._recvall(length)\n\n def _recvall(self, count: int) -> bytes:\n buff = b''\n\n while count:\n new_buff = self._socket.recv(count)\n\n if not new_buff:\n return None\n buff += new_buff\n count -= len(new_buff)\n return buff\n\n def _send_json(self, data: bytes) -> None:\n length = len(data)\n\n self._socket.sendall(struct.pack('!I', length))\n self._socket.sendall(data)\n\n def receive(self) -> QuestionType:\n data = self._receive_json()\n\n if data is None:\n return None\n return json.loads(data)\n\n def send(self, response: int) -> None:\n data = json.dumps(response).encode('utf-8')\n\n self._send_json(data)\n","sub_path":"bui_src/server_handler.py","file_name":"server_handler.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"639877430","text":"import random\nfrom advent import *\nfrom advent import Game, Location, Connection, Object, Animal, Robot, Pet, Player, Lockable, Container\nfrom advent import NORTH, SOUTH, EAST, WEST, UP, DOWN, RIGHT, LEFT, IN, OUT, FORWARD, BACK, NORTH_WEST, NORTH_EAST, SOUTH_WEST, SOUTH_EAST, NOT_DIRECTION\n\ngame = Game(\"test\")\n\n\n# LOCATIONS\nroom = game.new_location(\n \"Room\",\n\"\"\"You are in a room. There is a bookshelf in front of you and a door to your left.\"\"\"\n)\n\ntheater = game.new_location(\n \"Theater\",\n\"\"\"a home movie theater\"\"\"\n)\n\nbookshelf1 = game.new_location(\n \"Bookshelf\",\n\"\"\"a dusty bookshelf. a book is missing from the shelf. a key is on the shelf.\"\"\"\n)\n\nsaferoom = game.new_location(\n \"Safe\",\n\"\"\"a small room with a safe\"\"\"\n)\n\n\n# CONTAINERS\nsafe2 = saferoom.add_object(Container(\"safe\", \"An old safe\"))\ndvdplayer = theater.add_object(Container(\"dvd player\", \"a dvd player\"))\n\n\n# CONNECTIONS\ngame.new_connection(\"Bookshelf\", room, bookshelf1, [FORWARD], [BACK])\ngame.new_connection(\"Theater\", room, theater, [LEFT], [RIGHT])\nbookshelf2 = game.new_connection(\"Safe\", bookshelf1, saferoom, [FORWARD], [BACK])\n\n\n# OBJECTS\nbook = room.new_object(\"book\", \"an old book\")\nkey = bookshelf1.new_object(\"key\", \"an old key\")\nnacho = safe2.new_object(\"nacho libre\", \"The last copy of nacho libre on planet earth\")\n\n\n# USE OBJECT\nbookshelf2.set_flag('locked')\ndef pick_lock(game, thing):\n game.output('the shelf moves revealing a hidden room')\n thing.unset_flag('locked')\nbookshelf2.add_phrase('replace book', pick_lock, [book])\n\ndvdplayer.set_flag('locked')\ndef pick_lock(game, thing):\n game.output('the lights dim and nacho libre begins to play on the big screen. NACHOOOOOOOOOOOOOO!')\n thing.unset_flag('locked')\ndvdplayer.add_phrase('play movie', pick_lock, [book])\n\n\n# REQUIREMENTS\nsafe2.make_requirement(key)\ndvdplayer.make_requirement(nacho)\n\nplayer = game.new_player(room)\n\ntest_script = Script(\"test\",\n\"\"\"\n> take book\n> go forward\n> take key\n> replace book\n> go forward\n> open safe\n> take nacho libre\n> go back\n> go back\n> go left\n> play movie\n> end\n\n\"\"\")\n\nplayer.add_script(test_script)\n\ngame.run()","sub_path":"adventure/gametest.py","file_name":"gametest.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"69440838","text":"\"\"\"\nCls and bbox head for FPN feature maps.\n\"\"\"\n\nimport torch.nn as nn\n\n\nclass ClsBBoxHead_fc(nn.Module):\n \"\"\"Classification and bounding box regression head using fully-connected style.\n \"\"\"\n\n def __init__(self, depth, pool_size, num_classes):\n super(ClsBBoxHead_fc, self).__init__()\n self.depth = depth\n self.num_classes = num_classes\n self.fc_0 = nn.Linear(self.depth * pool_size[0] * pool_size[1], 1024)\n self.fc_1 = nn.Linear(1024, 1024)\n self.relu = nn.ReLU(inplace=True)\n self.fc_cls = nn.Linear(1024, num_classes)\n self.fc_bbox = nn.Linear(1024, num_classes * 4)\n self.log_softmax = nn.LogSoftmax(dim=1)\n\n def forward(self, x):\n \"\"\"\n \n Args:\n x: (NxS)xCxHxW, roi fixed dimensional representation after pooling like RoIAlign,\n HxW: fixed size, like 7x7.\n\n Returns:\n cls_prob: (NxS)x num_classes, probability of class.\n bbox_reg: (NxS)x num_classes x 4(dx, dy, dw, dh), defined in R-CNN paper.\n \n Notes: In above, S: number of rois per image feed to predict heads\n \n \"\"\"\n x = x.view(x.size(0), -1)\n x = self.fc_0(x)\n x = self.relu(x)\n x = self.fc_1(x)\n x = self.relu(x)\n\n fc_out_cls = self.fc_cls(x)\n cls_prob = self.log_softmax(fc_out_cls)\n bbox_reg = self.fc_bbox(x)\n bbox_reg = bbox_reg.view(-1, self.num_classes, 4)\n\n return cls_prob, bbox_reg\n\n\nclass ClsBBoxHead_conv(nn.Module):\n \"\"\"Classification and bounding box regression head using Conv style.\n \"\"\"\n\n def __init__(self, depth, pool_size, num_classes):\n super(ClsBBoxHead_conv, self).__init__()\n self.num_classes = num_classes\n self.depth = depth\n self.conv1 = nn.Conv2d(depth, 1024, kernel_size=pool_size, stride=1)\n self.bn1 = nn.BatchNorm2d(1024)\n self.conv2 = nn.Conv2d(1024, 1024, kernel_size=1, stride=1)\n self.bn2 = nn.BatchNorm2d(1024)\n self.relu = nn.ReLU(inplace=True)\n self.fc_cls = nn.Linear(1024, num_classes)\n self.fc_bbox = nn.Linear(1024, num_classes * 4)\n self.log_softmax = nn.LogSoftmax(dim=1)\n self._init_parameters()\n\n def _init_parameters(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal(m.weight)\n\n def forward(self, x):\n \"\"\"\n\n Args:\n x: NxSxHxW, rois fixed dimensional representation after pooling like RoIAlign,\n HxW: fixed size, like 14x14.\n\n Returns:\n cls_prob: NxSxNum_classes, probability of class.\n bbox_reg: NxSxNum_classes*4(dx, dy, dw, dh), defined in R-CNN paper.\n \n Notes: In above, S: number of rois per image feed to predict heads\n\n \"\"\"\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = self.bn2(x)\n x = self.relu(x)\n\n x = x.view(-1, 1024)\n fc_out_cls = self.fc_cls(x)\n cls_prob = self.log_softmax(fc_out_cls)\n bbox_reg = self.fc_bbox(x)\n bbox_reg = bbox_reg.view(-1, self.num_classes, 4)\n\n return cls_prob, bbox_reg\n","sub_path":"heads/cls_bbox_fpn.py","file_name":"cls_bbox_fpn.py","file_ext":"py","file_size_in_byte":3245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"559024522","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport re\nimport pymysql\ndb = pymysql.connect(host='127.0.0.1',user='root',passwd='',db='mysql',charset='utf8')\n\ncur = db.cursor()\ncur.execute(\"USE scraping\")\n\n\n\ndef store(name, url):\n cur.execute(\"INSERT INTO herolist (name, url) VALUES (\\\"%s\\\",\\\"%s\\\")\",(name, url))\n cur.connection.commit()\n\npath = 'webpage\\hero.html'\nhtmlfile = open(path, 'r', encoding='utf-8')\nhtmlhandle = htmlfile.read()\nbsObj = BeautifulSoup(htmlhandle,'html.parser')\nfor herolist in bsObj.find(\"ul\",{\"class\":\"herolist clearfix\"}).li.next_siblings:\n name = herolist.find(\"img\").attrs['alt']\n hero = herolist.find(\"a\").attrs['href']\n url = ('http://pvp.qq.com/web201605/{0}'.format(hero))\n store(name,url)\ncur.close()\ndb.close()\n","sub_path":"Web-Scraping-First-Try/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"49671861","text":"# https://github.com/python-telegram-bot/python-telegram-bot\n# sudo pip install python-telegram-bot\nimport telegram\n\nclass favisbot(object):\n '''favisbot is communicate with telegram'''\n\n def __init__(self, *args):\n # if not args:\n # print ('no arguments')\n # else:\n # for i,arg in enumerate(args):\n # if type(arg)==str:\n # print (arg)\n self.favisbot = telegram.Bot(token='201142916:AAEBvuAYEXCKFe6Ql_DdkBk6V3Y3G6CdIZU')\n self.chat_id = 185388733\n\n def setBotKey(self, token, chat_id):\n self.favisbot = telegram.Bot(token=token)\n self.chat_id = chat_id\n\n def whisper(self, msgtype, msg):\n if not msgtype:\n print (\"msgtype is not allowed null or empty. only use in [plain|html|imglink|img]\")\n\n bot = self.favisbot\n if msgtype == 'plain':\n bot.sendMessage(chat_id=self.chat_id, text=msg)\n elif msgtype == 'html':\n bot.sendMessage(chat_id=self.chat_id, text=msg, parse_mode=telegram.ParseMode.HTML)\n elif msgtype == 'imglink':\n bot.sendPhoto(chat_id=self.chat_id, photo=msg)\n elif msgtype == 'img':\n bot.sendPhoto(chat_id=self.chat_id, photo=open(msg, 'rb'))\n else:\n print (\"msgtype is not allowed except [plain|html|imglink|img]\")\n\n def getBot(self):\n return self.favisbot\n\n \nif __name__ == '__main__':\n bot = favisbot()\n bot.whisper('plain','testing')\n \n \n","sub_path":"msgbot/favisbot.py","file_name":"favisbot.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"213351041","text":"from django.urls import path\nfrom django.contrib.auth.views import LogoutView\nfrom . import views\nfrom .views import (\n BaseView, \n ProductDetailView, \n CategoryDetailView, \n CartView, \n AddToCartView, \n DeleteFromCartView,\n ChangeQTYView,\n CheckoutView,\n MakeOrderView,\n PayedOnlineOrderView,\n LoginView,\n RegistrationView,\n ProfileView,\n PizzaAddView,\n BeerAddView,\n ProductUpgradeView,\n SearchResultsView,\n)\n\nurlpatterns = [\n path(\"\", BaseView.as_view(), name='base'),\n path('products///', ProductDetailView.as_view(), name='product_detail'),\n path('category//', CategoryDetailView.as_view(), name='category_detail'),\n path('cart/', CartView.as_view(), name='cart'),\n path('add-to-cart///',AddToCartView.as_view(), name='add_to_cart'),\n path('delete-from-cart///',DeleteFromCartView.as_view(), name='delete_from_cart'),\n path('change-qty///',ChangeQTYView.as_view(), name='change_qty'),\n path('checkout/', CheckoutView.as_view(), name='checkout'),\n path('makeorder/', MakeOrderView.as_view(), name='make_order'),\n path('payed-online-order/', PayedOnlineOrderView.as_view(), name='payed_online_order'),\n path('login/', LoginView.as_view(), name='login'),\n path('logout/', LogoutView.as_view(next_page=\"/\"), name='logout'),\n path('registration/', RegistrationView.as_view(), name='registration'),\n path('profile/', ProfileView.as_view(), name='profile'),\n path('pizza_add/', PizzaAddView.as_view(), name='pizza_add'),\n path('beer_add/', BeerAddView.as_view(), name='beer_add'),\n path('upgrade///',ProductUpgradeView.as_view(), name='upgrade'),\n path('search/', SearchResultsView.as_view(), name='search_results'),\n]\n\n","sub_path":"pizza_shop/mainapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"341040994","text":"#!/usr/bin/env python\nimport logging\nimport string\nimport os\nimport time\nimport argh\nimport pandas as pd\nimport mylib\nimport mylib.tools\nimport mylib.data\nfrom numpy.random import randn, permutation\n\n_s_orig = dict(str=['a', 'f', 'g'], float=['b', 'c', 'd', 'e'])\n_s = mylib.tools.dict_of_lists_to_dict(_s_orig)\n_n = len(_s)\n\ndef gen_data(m=10, n=_n):\n df = pd.DataFrame(randn(m, n))\n df.columns = list(string.ascii_lowercase[:n])\n for k in _s_orig['str']:\n df[k] = [mylib.data.id_generator(m) for x in range(m)]\n df = df.set_index('a')\n return df\n\ndef periodically_generate_new_data():\n return mylib.tools.run_in_background(_periodically_generate_new_data)\n\ndef _periodically_generate_new_data(period=5, nmax=100, basedir='./data', scramble_columns=True):\n if not os.path.exists(basedir):\n os.makedirs(basedir)\n dt = 1\n if nmax is None:\n dt = 0\n t = 0\n file_counter = 0\n while t < nmax:\n df = gen_data()\n if scramble_columns:\n df = df.iloc[:,permutation(df.shape[1])]\n filename = os.path.join(basedir, '{}.csv.gz'.format(file_counter))\n file_counter += 1\n t += dt\n logging.info('WRITE: {} {}'.format(t, filename))\n df.to_csv(filename, compression='gzip')\n time.sleep(period)\n\nif __name__ == '__main__':\n argh.dispatch_command(_periodically_generate_new_data)\n","sub_path":"globber/generate_data.py","file_name":"generate_data.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"383621515","text":"#%%\n#! Base Configuration Class\n#! Don't use this class directly. \n#! Instead, sub-class it and override the configurations you need to change.\n\n\nclass Preprocessing(object):\n def __init__(self):\n self.MIN_SHARED_COUNTS = 20\n\n # (int) # of highly variable genes selected for pre-processing, default 2000\n # consider decreasing to 1500 when # of cells > 10k\n self.N_TOP_GENES = 2000\n\n self.N_PCS = 30\n self.N_NEIGHBORS = 30\n\n # (bool) use raw un/spliced counts or first order moments\n self.USE_RAW = False\n\n # (bool) rescaled Mu/Ms as input based on variance, default True \n self.RESCALE_DATA = True\n\nclass Regularization(object):\n def __init__(self):\n # (bool) regularization on loss function to push peak time away from 0.5\n # mainly used in unified time mode for linear phase portraits\n self.REG_LOSS = True\n # (float) gloablly adjust the magnitude of the penalty, recommend < 0.1\n self.REG_TIMES = 0.075\n # (float) scaling parameter of the regularizer\n self.REG_SCALE = 1\n\n # (list of tuples) [(gene1, trend1), (gene2, trend2), (gene3, trend3), ...], \n # a list of genes with trend can be one of {increase, decrease}, default None\n self.GENE_PRIOR = None\n self.GENE_PRIOR_SCALE = 5000\n\nclass Optimizer(object):\n def __init__(self):\n # (float) learning rate of the main optimizer\n self.LEARNING_RATE = 1e-2\n # (int) maximum iteration rate of main optimizer\n self.MAX_ITER = 12000\n\nclass FittingOption(object):\n def __init__(self):\n # Fitting options under Gaussian model \n # '1' = Unified-time mode \n # '2' = Independent mode\n self.FIT_OPTION = '1'\n\n # (str, experimental) methods to aggregate time metrix, default 'SVD'\n # Max SVD Raw\n self.DENSITY = 'SVD'\n # (str) whether to reorder cell based on relative positions for time assignment\n # Soft_Reorder (default) Hard (for Independent mode)\n self.REORDER_CELL = 'Soft_Reorder'\n # (bool) aggregate gene-specific time to cell time during fitting\n # controlled by self.FIT_OPTION\n self.AGGREGATE_T = True\n\n # (bool, experimental), whether clip negative predictions to 0, default False\n self.ASSIGN_POS_U = False\n\n # (bool, experimental) cell time restricted to (0, 1) if False, default False\n self.RESCALE_TIME = False\n\nclass VelocityGenes(object):\n def __init__(self):\n # (bool) linear regression $R^2$ on extreme quantile (default) or full data (adjusted)\n # valid when self.VGENES = 'basic'\n self.R2_ADJUST = True\n\n # (str) selection creteria for velocity genes used in RNA velocity construction, default basic\n # 1. raws, all highly variable genes specified by self.N_TOP_GENES will be used\n # 2. offset, linear regression $R^2$ and coefficient with offset, will override self.R2_ADJUST\n # 3. basic, linear regression $R^2$ and coefficient without offset\n # 4. single gene name, fit this designated gene alone, for model validation purpose only\n # 5. [list of gene names], manually provide a list of genes as velocity genes in string, might improve performance, see scNT\n self.VGENES = 'basic'\n\n # (float) threshold of R2 at later stage of the optimization proces\n # to capture the dynamics of more genes beside initially selected velocity genes\n # Note: self.AGENES_R2 = 1 will switch to origianl mode with no amplification\n self.AGENES_R2 = 1\n self.AGENES_THRES = 0.61\n\n # (bool, experimental) exclude cell that have 0 expression in either un/spliced when contributing to loss function\n self.FILTER_CELLS = False\n\nclass CellInitialization(object):\n def __init__(self):\n # (str) criteria for cell latent time initialization, default None\n # 1. None, initialized based on the exact order of input expression matrix\n # 2. gcount, str, initialized based on gene counts (https://www.science.org/doi/abs/10.1126/science.aax0249)\n # 3. cluster name, str, use diffusion map based time as initialization\n # 4. [(gene1, trend1), (gene2, trend2), (gene3, trend3), ...], list of tuples, \n # a list of genes with trend can be one of {increase, decrease} \n self.IROOT = None\n\n # (int) number of random initializations of time points, default 1\n # in rare cases, velocity field generated might be reversed, possibly because stably and monotonically changed genes\n # change this parameter to 2 might do the trick\n self.NUM_REP = 1\n # when self.NUM_REP = 2, the following parameter will determine how the second time will be initialized \n # re_pre, reverse the inferred cell time of first run\n # re_init, reverse the initialization time of first run\n self.NUM_REP_TIME = 're_pre'\n\nclass Configuration():\n def __init__(self):\n Preprocessing.__init__(self)\n Regularization.__init__(self)\n Optimizer.__init__(self)\n FittingOption.__init__(self)\n VelocityGenes.__init__(self)\n CellInitialization.__init__(self)\n\n # (int) speficy the GPU card for acceleration, default 0\n # -1 will switch to CPU mode\n self.GPU = 0\n\n # Gaussian Mixture\n self.BASE_FUNCTION = 'Gaussian'\n\n # Deterministic Curve Linear\n self.GENERAL = 'Curve'\n\n # (str) embedding format of adata, e.g. pca, tsne, umap, \n # if None (default), algorithm will choose one automatically\n self.BASIS = None\n\n # (int, experimental) window size for sliding smoothing of distribution with highest probability\n # useful when self.DENSITY == 'Max'\n # self.WIN_SIZE = 50","sub_path":"unitvelo/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"82958177","text":"from sklearn import datasets\nimport numpy as np\n\n\ndef dist(x, y):\n return np.sqrt(np.sum((x - y) ** 2))\n\n\ndigits = datasets.load_digits()\n\nx_features = digits.data[0:1000]\nx_labels = digits.target[0:1000]\n\nx_test = digits.data[555]\n\n\nnum = len(x_features)\ndistances = np.zeros(num)\n\nfor i in range(num):\n distances[i] = dist(x_features[i], x_test)\n\nmin_index = np.argmin(distances)\n\nprint(x_labels[min_index])","sub_path":"image_class.py","file_name":"image_class.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"203640930","text":"# BEGIN_COPYRIGHT\n#\n# Copyright 2009-2016 CRS4.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy\n# of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n# END_COPYRIGHT\n\n\"\"\"\nHDFS core implementation.\n\"\"\"\n\nfrom . import impl\n\n\ndef init(backend=impl.DEFAULT):\n if backend == impl.NATIVE:\n import pydoop.utils.jvm as jvm\n jvm.load_jvm_lib()\n try:\n # NOTE: JVM must be already instantiated\n import pydoop.native_core_hdfs\n except ImportError:\n return None # should only happen at compile time\n else:\n return pydoop.native_core_hdfs\n elif backend == impl.JPYPE_BRIDGED:\n from pydoop.hdfs.core.bridged import get_implementation_module\n return get_implementation_module()\n else:\n raise ValueError(\"%r: unsupported hdfs backend\" % (backend,))\n\n\ntry:\n from pydoop.config import HDFS_CORE_IMPL\nexcept ImportError:\n _CORE_MODULE = None # should only happen at compile time\nelse:\n _CORE_MODULE = init(backend=HDFS_CORE_IMPL)\n\n\ndef core_hdfs_fs(host, port, user):\n if _CORE_MODULE is None:\n raise RuntimeError(\n 'module not initialized, check that Pydoop is correctly installed'\n )\n return _CORE_MODULE.CoreHdfsFs(host, port, user)\n","sub_path":"pydoop/hdfs/core/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"360949910","text":"import cv2\nimport imageio\nfrom os.path import splitext\nfrom threading import Thread\nfrom time import time\n\ngif_animations = {}\ngif_sizes = {}\n\n\nclass ImageDisplay(object):\n def __init__(self, animations, default_animation, fps=15):\n if not animations:\n raise AttributeError('No animations specified')\n if default_animation not in animations:\n raise AttributeError(\"Default animation '{}' doesn't exists in animations\".format(default_animation))\n self.animations = animations\n self._window_name = 'faceDisplay'\n self._actual_animation = None\n self._actual_animation_name = None\n self._gif_frame_number = 0\n self._display_thread = None\n self._display_function = None\n self._gif_to_frames()\n self.change_animation(default_animation)\n self._running = False\n self._frame_frequency = 1.0 / fps\n self._last_display_time = 1\n self.start()\n\n @property\n def fps(self):\n return 1.0/self._frame_frequency\n\n @fps.setter\n def fps(self, value):\n self._frame_frequency = 1.0/value\n\n def _gif_to_frames(self):\n for animation_name in self.animations:\n animation_path = self.animations[animation_name]\n filename, file_extension = splitext(animation_path)\n if file_extension == '.gif':\n gif_frames = imageio.mimread(animation_path)\n gif_size = len(gif_frames)\n if gif_size > 0:\n bgr_frames = [cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) for frame in gif_frames]\n gif_animations[animation_name] = bgr_frames\n gif_sizes[animation_name] = gif_size\n else:\n raise ImportError(\"Can't open {} gif\".format(self.animations[animation_name]))\n\n def start(self):\n if self._display_thread is None:\n self._window_name = '{}1'.format(self._window_name)\n self._display_thread = Thread(target=self._display_loop)\n self._display_thread.daemon = True\n self._running = True\n self._display_thread.start()\n\n def _load_animation(self, animation_name):\n animation_path = self.animations[animation_name]\n filename, file_extension = splitext(animation_path)\n if file_extension == '.gif':\n if animation_name in gif_animations:\n return gif_animations[animation_name], self._display_gif\n else:\n video = cv2.VideoCapture(self.animations[animation_name])\n if video.isOpened():\n return video, self._display_video\n else:\n raise ImportError(\"Can't open {} video\".format(self.animations[animation_name]))\n\n def change_animation(self, animation_name):\n if self._actual_animation_name != animation_name:\n if animation_name in self.animations:\n self._actual_animation, self._display_function = self._load_animation(animation_name)\n self._gif_frame_number = 0\n self._actual_animation_name = animation_name\n\n def _display_video(self):\n if self._actual_animation.isOpened():\n ret, frame = self._actual_animation.read()\n if ret:\n cv2.imshow(self._window_name, frame)\n else:\n self._actual_animation.set(cv2.CAP_PROP_POS_FRAMES, 1)\n cv2.waitKey(1)\n\n def _display_gif(self):\n if self._gif_frame_number < gif_sizes[self._actual_animation_name]:\n cv2.imshow(self._window_name, self._actual_animation[self._gif_frame_number])\n cv2.waitKey(1)\n self._gif_frame_number += 1\n else:\n self._gif_frame_number = 0\n\n def _display_loop(self):\n cv2.namedWindow(self._window_name, cv2.WND_PROP_FULLSCREEN)\n cv2.setWindowProperty(self._window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n cv2.waitKey(1)\n while self._running:\n if time() - self._last_display_time > self._frame_frequency:\n self._display_function()\n self._last_display_time = time()\n\n def close(self):\n self._running = False\n self._display_thread.join()\n self._display_thread = None\n print ('fin')\n cv2.destroyWindow(self._window_name)\n","sub_path":"Poppy/pypot/FilesToCopy/pypot-3.0.4-py2.7.egg/pypot/extras/image_display.py","file_name":"image_display.py","file_ext":"py","file_size_in_byte":4365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"121496925","text":"class Solution(object):\n def findStrobogrammatic(self, n):\n \"\"\"\n :type n: int\n :rtype: List[str]\n \"\"\"\n \n res = self.helper(n, [])\n return [a for a in res if a[0] !='0'] if n!=1 else res\n \n def helper(self, n, res):\n if n==0: return ['']\n if n == 1: return ['0','1','8']\n if n==2: return ['00','11','69','88','96']\n res = []\n for ele in self.helper(2, res):\n cur = []\n for char in (self.helper(n-2, res)):\n cur.append(ele[0] + char + ele[1])\n res += cur\n return res\n \n # if n==0: return ['']\n # if n == 1: return ['0','1','8']\n # if n==2: return ['00','11','69','88','96']\n \n # return self.find(n, n)\n \n # def find(self, x, y):\n \n # if x==0: return [\"\"]\n # if x==1: return [\"0\",\"1\",\"8\"]\n # base = self.find(x-2, y)\n # ans = []\n # for s in base:\n # if x!=y:\n # ans.append(\"0\"+ s +\"0\")\n # ans.append([\"1\"+ s +\"1\", \"6\"+ s +\"9\", \"8\"+ s +\"8\", \"9\"+ s +\"6\"])\n","sub_path":"python/247. Strobogrammatic Number II.py","file_name":"247. Strobogrammatic Number II.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"87343300","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 4 15:38:38 2013\n\n@author: psilentp\n\"\"\"\n\ndef histeq(im,nbr_bins=256):\n import numpy as np \n \"\"\"histogram equalization of a grascale image\"\"\"\n imhist,bins = np.histogram(im.flatten(),nbr_bins,normed = True)\n cdf = imhist.cumsum()\n cdf = 255 * cdf/cdf[-1]\n im2 = np.interp(im.flatten(),bins[:-1],cdf)\n return im2.reshape(im.shape),cdf\n ","sub_path":"Behavioral_analysis/Obsolete/imtools.py","file_name":"imtools.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"410568780","text":"# encoding= utf-8\n# Author: HHB\n# Data: 2022/11/10 11:12\n\n\nimport time\nimport re\nimport requests\nimport pymysql\nfrom lxml import etree\n\n'''\n\n\n俄罗斯卫星通讯社\n\nhttps://sputniknews.cn/russia/\n\n\n\n\n'''\n\nclass Russian(object):\n\n def __init__(self):\n\n self.url = 'https://sputniknews.cn/russia/'\n self.headers = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36',\n }\n\n self.connect = pymysql.connect(host='127.0.0.1', port=3306, db='policeproject',\n user='root', passwd='123456', charset='utf8', )\n self.cursor = self.connect.cursor()\n\n def Time_(self, times):\n timeArray = time.localtime(int(times))\n publish_date = time.strftime(\"%Y-%m-%d %H:%M:%S\", timeArray)\n return publish_date\n\n def parse_url(self):\n\n response = requests.get(url=self.url, headers=self.headers).text\n tree = etree.HTML(response)\n url_list = tree.xpath('//div[@class=\"list__content\"]/a/@href')\n for detail_url in url_list:\n self.parse_detail(detail_url)\n\n last_url = tree.xpath('//div[contains(@class,\"list__item\")][last()]/div/a/@href')[0]\n last_url_id = re.findall(\"(\\d+).html\", last_url)[0]\n more_one_url = 'https://sputniknews.cn/services/russia/more.html?id=' + last_url_id\n print('more_one_url:', more_one_url)\n more_one_response = requests.get(url=more_one_url, headers=self.headers).text\n more_one_html = etree.HTML(more_one_response)\n more_one_detail_url = more_one_html.xpath('//div[@class=\"list__content\"]/a/@href')\n for detail_url in more_one_detail_url:\n self.parse_detail(detail_url)\n\n last_url = more_one_html.xpath('//div[contains(@class,\"list__item\")][last()]/div/a/@href')[0]\n last_url_id = re.findall(\"(\\d+).html\", last_url)[0]\n more_two_url = 'https://sputniknews.cn/services/russia/more.html?id=' + last_url_id\n print('more_two_url:', more_two_url)\n more_two_response = requests.get(url=more_two_url, headers=self.headers).text\n more_two_html = etree.HTML(more_two_response)\n more_two_detail_url = more_two_html.xpath('//div[@class=\"list__content\"]/a/@href')\n for detail_url in more_two_detail_url:\n self.parse_detail(detail_url)\n\n\n def parse_detail(self, detail_url):\n detail_url = 'https://sputniknews.cn' + detail_url\n print('详情连接:', detail_url)\n response = requests.get(url=detail_url).text\n tree = etree.HTML(response)\n title = tree.xpath('//h1/text()')[0]\n print('标题:', title)\n content_list = tree.xpath('//div[@class=\"article__body\"]/div/div/text()')\n content = ''.join(content_list).replace('\\n', '').replace('\\r', '').replace(' ', '').strip()\n try:\n img_url = []\n img_url_list = tree.xpath('//div[@class=\"photoview__open\"]/img/@src')\n for img in img_url_list:\n img_url.append(img)\n if len(img_url) < 1:\n img_url = ''\n else:\n img_url = str(img_url)\n except:\n img_url = ''\n print('图片:', img_url)\n\n times = tree.xpath('//div[contains(@class,\"article__info\")]/a/@data-unixtime')[0]\n publish_time = self.Time_(times)\n print('时间:', publish_time)\n\n try:\n view_num = int(\n tree.xpath('//a[@class=\"b-counters-icon b-counters-icon_comments js-anchor\"]/text()')\n [0].replace('\\n', '').strip())\n except:\n view_num = 0\n print('观看量:', view_num)\n try:\n like_num = int(\n tree.xpath('//span[@class=\"b-counters-icon b-counters-icon_like\"]/text()')[0].replace('\\n', '').strip())\n except:\n like_num = 0\n print('点赞量:', like_num)\n try:\n point_on = int(\n tree.xpath('//span[@class=\"b-counters-icon b-counters-icon_dislike\"]/text()')[0].replace('\\n',\n '').strip())\n except:\n point_on = 0\n print('点踩量:', point_on)\n\n print('内容:', content)\n\n\n sql = \"insert into news_data(`title`,\" \\\n \"`author`,\" \\\n \"`content`,\" \\\n \"`url`,\" \\\n \"`source`,\" \\\n \"`publish_time`) value (%s,%s,%s,%s,%s,%s)\"\n\n self.cursor.execute(sql, (\n self.news_item['title'], self.news_item['author'], self.content_item['content'],\n self.news_item['url'], '俄罗斯卫星通讯社', self.news_item['publish_time']))\n self.connect.commit()\n\nif __name__ == '__main__':\n russian = Russian()\n\n russian.parse_url()\n","sub_path":"PoliceProject/RussianSatelliteNewsAgency.py","file_name":"RussianSatelliteNewsAgency.py","file_ext":"py","file_size_in_byte":4908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"74943699","text":"'''\r\nEllie (github_repo/lib/general.py)\r\n'''\r\n\r\nimport time\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.common.exceptions import NoSuchElementException\r\nfrom selenium.webdriver import DesiredCapabilities\r\nfrom selenium.webdriver.common.by import By\r\n\r\nimport settings\r\n\r\n\r\ndef enter_text(driver, by, locator, text):\r\n element = driver.find_element(by, locator)\r\n element.clear()\r\n element.send_keys(text)\r\n\r\n\r\ndef scroll(driver, ypixels=250):\r\n max = driver.execute_script(\"return window.scrollMaxY;\")\r\n current = driver.execute_script(\"return window.scrollY;\")\r\n\r\n if current == max:\r\n return False\r\n driver.execute_script(\"window.scrollBy(0, {0})\".format(ypixels))\r\n time.sleep(0.5)\r\n return True\r\n\r\n\r\ndef is_element_present(driver, by, locator):\r\n try:\r\n return driver.find_element(by, locator)\r\n except NoSuchElementException:\r\n return False\r\n\r\n\r\ndef is_element_visible(driver, by, locator):\r\n element = is_element_present(driver, by, locator)\r\n if element:\r\n return element.is_displayed()\r\n return element\r\n\r\n\r\ndef login(driver, username=\"admin\", password=\"Password\"):\r\n enter_text(driver, By.ID, \"txtUsername\", username)\r\n enter_text(driver, By.ID, \"txtPassword\", password)\r\n driver.find_element_by_id(\"btnLogin\").click()\r\n\r\n\r\ndef logout(driver):\r\n driver.find_element_by_id(\"welcome\").click()\r\n driver.find_element_by_link_text(\"Logout\").click()\r\n\r\n\r\ndef get_browser():\r\n if settings.EXECUTE_MODE.lower() == 'remote':\r\n driver = webdriver.Remote(command_executor='http://127.0.0.1:1234/wd/hub',\r\n desired_capabilities=getattr(DesiredCapabilities, settings.BROWSER.upper()))\r\n else:\r\n driver = getattr(webdriver, settings.BROWSER.title())\r\n driver.implicitly_wait(settings.IMPLICIT_WAIT)\r\n driver.maximize_window()\r\n return driver\r\n","sub_path":"EllieSkobelPython/needsorting/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"605204590","text":"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass CoordConv2d(nn.Conv2d):\n\n def __init__(self, in_chan, out_chan, kernel_size=3, stride=1, padding=1, dilation=1, groups=1, bias=True):\n super(CoordConv2d, self).__init__(\n in_chan + 2, out_chan, kernel_size, stride=stride,\n padding=padding, dilation=dilation, groups=groups, bias=bias)\n\n def forward(self, x):\n batchsize, H, W = x.size(0), x.size(2), x.size(3)\n h_range = torch.linspace(-1, 1, H, device=x.device, dtype=x.dtype)\n w_range = torch.linspace(-1, 1, W, device=x.device, dtype=x.dtype)\n h_chan, w_chan = torch.meshgrid(h_range, w_range)\n h_chan = h_chan.expand([batchsize, 1, -1, -1])\n w_chan = w_chan.expand([batchsize, 1, -1, -1])\n\n feat = torch.cat([h_chan, w_chan, x], dim=1)\n return F.conv2d(feat, self.weight, self.bias,\n self.stride, self.padding, self.dilation, self.groups)\n\n\nclass DY_Conv2d(nn.Conv2d):\n\n def __init__(self, in_chan, out_chan, kernel_size=3,\n stride=1, padding=1, dilation=1, groups=1, bias=False,\n act=nn.ReLU(inplace=True), K=4,\n temperature=30, temp_anneal_steps=3000):\n super(DY_Conv2d, self).__init__(\n in_chan, out_chan * K, kernel_size, stride=stride,\n padding=padding, dilation=dilation, groups=groups, bias=bias)\n assert in_chan // 4 > 0\n self.K = K\n self.act = act\n self.se_conv1 = nn.Conv2d(in_chan, in_chan // 4, 1, 1, 0, bias=True)\n self.se_conv2 = nn.Conv2d(in_chan // 4, K, 1, 1, 0, bias=True)\n self.temperature = temperature\n self.temp_anneal_steps = temp_anneal_steps\n self.temp_interval = (temperature - 1) / temp_anneal_steps\n\n def get_atten(self, x):\n bs, _, h, w = x.size()\n atten = torch.mean(x, dim=(2, 3), keepdim=True)\n atten = self.se_conv1(atten)\n atten = self.act(atten)\n atten = self.se_conv2(atten)\n if self.training and self.temp_anneal_steps > 0:\n atten = atten / self.temperature\n self.temperature -= self.temp_interval\n self.temp_anneal_steps -= 1\n atten = atten.softmax(dim=1).view(bs, -1)\n return atten\n\n\n def forward(self, x):\n bs, _, h, w = x.size()\n atten = self.get_atten(x)\n\n out_chan, in_chan, k1, k2 = self.weight.size()\n W = self.weight.view(1, self.K, -1, in_chan, k1, k2)\n W = (W * atten.view(bs, self.K, 1, 1, 1, 1)).sum(dim=1)\n W = W.view(-1, in_chan, k1, k2)\n\n b = self.bias\n if not b is None:\n b = b.view(1, self.K, -1)\n b = (b * atten.view(bs, self.K, 1)).sum(dim=1).view(-1)\n\n x = x.view(1, -1, h, w)\n\n out = F.conv2d(x, W, b, self.stride, self.padding,\n self.dilation, self.groups * bs)\n out = out.view(bs, -1, out.size(2), out.size(3))\n return out\n\n\nif __name__ == '__main__':\n net = DY_Conv2d(32, 64, 3, 1, 1, bias=True)\n inten = torch.randn(2, 32, 224, 224)\n out = net(inten)\n print(out.size())\n","sub_path":"pytorch_loss/conv_ops.py","file_name":"conv_ops.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"70871862","text":"import argparse\nimport glob\n\nfrom PIL import Image\nimport torch\nimport torchvision.transforms as transforms\n\nfrom algo.kmeans import lloyd\nfrom model.vgg import VGGFeatures\n\n\ndef get_parameters():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--imsize\", type=int, default=128)\n parser.add_argument(\"--n_samples\", type=int, default=100)\n parser.add_argument(\"--save_to\", type=str, default=\"../../my-dataset\")\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n opt = get_parameters()\n opt.root = \"../tmp\"\n opt.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n model = VGGFeatures().to(opt.device)\n model.eval()\n trans = transforms.Compose(\n [\n transforms.Resize((opt.imsize, opt.imsize / 2)),\n transforms.ToTensor(),\n ]\n )\n\n feats = []\n for f in glob.glob(opt.root + \"/*\"):\n x = trans(Image.open(f))\n feats.append(model(x))\n feats = torch.cat(feats, dim=0).numpy()\n\n idx, ctr = lloyd(feats, 2, opt.device)\n\n","sub_path":"mysrc/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"457800092","text":"\ndef even_integers(n):\n for i in range(n):\n if i % 2 == 0:\n yield i\n\n\n\ndef get_multiples(n):\n\ti=1\n\twhile True:\n\t\tif i % n == 0:\n\t\t\tyield i\n\t\ti+=1\n","sub_path":"Video_modules/1_Generator_Functions_And_Expressions/junk/even_integers.py","file_name":"even_integers.py","file_ext":"py","file_size_in_byte":167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"158068352","text":"#!/usr/bin/python\n#\n# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# -*- coding: utf-8 -*-\n\n\ndef initialize_variables():\n for i in range(MSIZE):\n A.append([])\n B.append([])\n C.append([])\n for j in range(MSIZE):\n A[i].append(\"A.%d.%d\" % (i, j))\n B[i].append(\"B.%d.%d\" % (i, j))\n C[i].append(\"C.%d.%d\" % (i, j))\n\n\ndef fill_matrices():\n for c in ['A', 'B', 'C']:\n for i in range(MSIZE):\n for j in range(MSIZE):\n tmp = \"%s.%d.%d\" % (c, i, j)\n f = open(tmp, 'w')\n for _ in range(BSIZE):\n for jj in range(BSIZE):\n if c == 'C':\n f.write('0.0')\n else:\n f.write('2.0')\n if jj < BSIZE - 1:\n f.write(' ')\n f.write('\\n')\n f.close()\n\n\n# ## MAIN PROGRAM ## #\n\nif __name__ == \"__main__\":\n import sys\n from matmul_tasks import multiply\n\n args = sys.argv[1:]\n MSIZE = int(args[0])\n BSIZE = int(args[1])\n\n A = []\n B = []\n C = []\n\n initialize_variables()\n fill_matrices()\n\n for i in range(MSIZE):\n for j in range(MSIZE):\n for k in range(MSIZE):\n multiply(A[i][k], B[k][j], C[i][j], BSIZE)\n","sub_path":"tests/sources/cli/runapps/0_job_submit/src/matmul/matmul_files.py","file_name":"matmul_files.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"205873256","text":"import torch\nimport librosa\nimport platform\nimport numpy as np\n\n# torchaudio is only supported on Linux\nif platform.system() == 'Linux':\n try:\n import torchaudio\n except ImportError:\n raise ImportError(\"SpectrogramPaser requires torchaudio package.\")\n\n\nclass Spectrogram(object):\n \"\"\"\n Create a spectrogram from a audio signal.\n\n Args: sample_rate, window_size, stride, feature_extract_by\n sample_rate (int): Sample rate of audio signal. (Default: 16000)\n window_size (int): window size (ms) (Default : 20)\n stride (int): Length of hop between STFT windows. (ms) (Default: 10)\n \"\"\"\n def __init__(self, sample_rate=16000, window_size=20, stride=10):\n self.sample_rate = sample_rate\n self.n_fft = int(sample_rate * 0.001 * window_size)\n self.hop_length = int(sample_rate * 0.001 * (window_size - stride))\n\n def __call__(self, signal):\n spectrogram = torch.stft(\n torch.FloatTensor(signal),\n self.n_fft,\n hop_length=self.hop_length,\n win_length=self.n_fft,\n window=torch.hamming_window(self.n_fft),\n center=False,\n normalized=False,\n onesided=True\n )\n spectrogram = (spectrogram[:, :, 0].pow(2) + spectrogram[:, :, 1].pow(2)).pow(0.5)\n spectrogram = np.log1p(spectrogram.numpy())\n\n return spectrogram\n\n\nclass MelSpectrogram(object):\n \"\"\"\n Create MelSpectrogram for a raw audio signal. This is a composition of Spectrogram and MelScale.\n\n Args: sample_rate, n_mels, window_size, stride, feature_extract_by\n sample_rate (int): Sample rate of audio signal. (Default: 16000)\n n_mels (int): Number of mfc coefficients to retain. (Default: 80)\n window_size (int): window size (ms) (Default : 20)\n stride (int): Length of hop between STFT windows. (ms) (Default: 10)\n feature_extract_by (str): which library to use for feature extraction(default: librosa)\n \"\"\"\n def __init__(self, sample_rate=16000, n_mels=80, window_size=20, stride=10, feature_extract_by='librosa'):\n self.sample_rate = sample_rate\n self.n_mels = n_mels\n self.n_fft = int(sample_rate * 0.001 * window_size)\n self.hop_length = int(sample_rate * 0.001 * stride)\n self.feature_extract_by = feature_extract_by.lower()\n\n if self.feature_extract_by == 'torchaudio':\n self.transforms = torchaudio.transforms.MelSpectrogram(\n sample_rate=sample_rate,\n win_length=window_size,\n hop_length=self.hop_length,\n n_fft=self.n_fft,\n n_mels=n_mels\n )\n self.amplitude_to_db = torchaudio.transforms.AmplitudeToDB()\n\n def __call__(self, signal):\n if self.feature_extract_by == 'torchaudio':\n melspectrogram = self.transforms(torch.FloatTensor(signal))\n melspectrogram = self.amplitude_to_db(melspectrogram)\n melspectrogram = melspectrogram.numpy()\n\n elif self.feature_extract_by == 'librosa':\n melspectrogram = librosa.feature.melspectrogram(\n y=signal,\n sr=self.sample_rate,\n n_mels=self.n_mels,\n n_fft=self.n_fft,\n hop_length=self.hop_length\n )\n melspectrogram = librosa.amplitude_to_db(melspectrogram, ref=np.max)\n\n else:\n raise ValueError(\"Unsupported library : {0}\".format(self.feature_extract_by))\n\n return melspectrogram\n\n\nclass MFCC(object):\n \"\"\"\n Create the Mel-frequency cepstrum coefficients (MFCCs) from an audio signal.\n\n Args: sample_rate, n_mfcc, window_size, stride, feature_extract_by\n sample_rate (int): Sample rate of audio signal. (Default: 16000)\n n_mfcc (int): Number of mfc coefficients to retain. (Default: 40)\n window_size (int): window size (ms) (Default : 20)\n stride (int): Length of hop between STFT windows. (ms) (Default: 10)\n feature_extract_by (str): which library to use for feature extraction(default: librosa)\n \"\"\"\n def __init__(self, sample_rate=16000, n_mfcc=40, window_size=20, stride=10, feature_extract_by='librosa'):\n self.sample_rate = sample_rate\n self.n_mfcc = n_mfcc\n self.n_fft = int(sample_rate * 0.001 * window_size)\n self.hop_length = int(sample_rate * 0.001 * stride)\n self.feature_extract_by = feature_extract_by.lower()\n\n if self.feature_extract_by == 'torchaudio':\n self.transforms = torchaudio.transforms.MFCC(\n sample_rate=sample_rate,\n n_mfcc=n_mfcc,\n log_mels=True,\n win_length=window_size,\n hop_length=self.hop_length,\n n_fft=self.n_fft\n )\n\n def __call__(self, signal):\n if self.feature_extract_by == 'torchaudio':\n mfcc = self.transforms(torch.FloatTensor(signal))\n mfcc = mfcc.numpy()\n\n elif self.feature_extract_by == 'librosa':\n mfcc = librosa.feature.mfcc(\n y=signal,\n sr=self.sample_rate,\n n_mfcc=self.n_mfcc,\n n_fft=self.n_fft,\n hop_length=self.hop_length\n )\n\n else:\n raise ValueError(\"Unsupported library : {0}\".format(self.feature_extract_by))\n\n return mfcc\n","sub_path":"kospeech/data/audio/feature.py","file_name":"feature.py","file_ext":"py","file_size_in_byte":5418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"337188871","text":"def menores_a_10(matriz):\n menores=[]\n for renglon in matriz:\n for elem in renglon:\n if elem < 10:\n menores.append(elem)\n return menores\n \ndef lee_matriz(ren,col):\n matriz=[]\n for i in range(ren):\n renglon=[]\n for j in range(col):\n valor = int(input())\n renglon.append(valor)\n matriz.append(renglon)\n return matriz\n\ndef main():\n renglones=int(input())\n columnas=int(input())\n matriz=lee_matriz(renglones,columnas)\n print(menores_a_10(matriz))\n\n\n\nif __name__=='__main__':\n main()\n","sub_path":"assignments/17MenoresANumero/src/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"470079389","text":"import numpy as np\nimport random\nimport constants\n\nclass Map:\n def __init__(self, dim):\n self.grid = self.create_grid(dim)\n self.target_loc = self.get_target_loc(dim)\n self.dim = dim\n \n def create_grid(self, dim):\n grid = np.zeros((dim, dim))\n for i in range(dim):\n for j in range(dim):\n p = random.random()\n if p < constants.flat_cell_prob:\n grid[i][j] = constants.flat\n elif p < constants.hill_cell_prob:\n grid[i][j] = constants.hilly\n elif p < constants.forest_cell_prob:\n grid[i][j] = constants.forested\n elif p < constants.maze_of_caves_prob:\n grid[i][j] = constants.maze_of_caves\n return grid\n\n def get_target_loc(self, dim):\n i = np.random.randint(0, high=dim)\n j = np.random.randint(0, high=dim)\n return (i, j)\n\n def print_map(self):\n printable = \"\"\n for i in range(self.dim):\n for j in range(self.dim):\n printable += str(self.grid[i][j]) + \" \"\n\n printable += \"\\n\"\n print(printable)\n\n def query(self, cell):\n answer = None\n #print(cell)\n terrain = self.get_terrain(cell)\n if cell == self.target_loc:\n i, j = cell\n if i < 0 or j < 0 or i >= self.dim or j >= self.dim:\n print(\"Error\")\n p = random.random()\n if terrain == constants.flat:\n if p < .1:\n answer = False\n else:\n answer = True\n elif terrain == constants.hilly:\n if p < .3:\n answer = False\n else:\n answer = True\n elif terrain == constants.forested:\n if p < .7:\n answer = False\n else:\n answer = True\n elif terrain == constants.maze_of_caves:\n if p < .9:\n answer = False\n else:\n answer = True\n else:\n answer = False\n return answer\n\n def get_terrain(self, cell):\n i, j = cell[0],cell[1]\n #print('hi')\n #print(i)\n #print(j)\n return self.grid[i][j]\n\n\n#test = Map(10)\n#test.print_map()\n\n \n\n","sub_path":"Search_And_Destroy_p3/_map.py","file_name":"_map.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"249940151","text":"\"\"\"\nGiven an array S of n integers, are there elements a, b, c, and d in S such that a + b + c + d = target?\n\nFind all unique quadruplets in the array which gives the sum of target.\n\nExample\nExample 1:\n\nInput:[2,7,11,15],3\nOutput:[]\n\nExample 2:\n\nInput:[1,0,-1,0,-2,2],0\nOutput:\n[[-1, 0, 0, 1]\n,[-2, -1, 1, 2]\n,[-2, 0, 0, 2]]\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param numbers: Give an array\n @param target: An integer\n @return: Find all unique quadruplets in the array which gives the sum of zero\n \"\"\"\n def fourSum(self, numbers, target):\n # write your code here\n numbers.sort()\n ans = []\n\n for i in range(len(numbers) - 3):\n if i and numbers[i] == numbers[i-1]:\n continue\n for j in range( i+1, len(numbers) - 2 ):\n if j != i+1 and numbers[j] == numbers[j-1]:\n continue\n\n sumTmp = target - numbers[i] - numbers[j]\n left, right = j+1, len(numbers) - 1\n while left < right:\n if numbers[left] + numbers[right] < sumTmp:\n left += 1\n elif numbers[left] + numbers[right] > sumTmp:\n right -= 1\n elif numbers[left] + numbers[right] == sumTmp:\n ans.append([numbers[i], numbers[j], numbers[left], numbers[right]])\n right -= 1\n left += 1\n\n while left < right and numbers[left] == numbers[left - 1]:\n left += 1\n\n while left < right and numbers[right] == numbers[right + 1]:\n right -= 1\n\n return ans\n","sub_path":"Python Version/Pointer/4sum.py","file_name":"4sum.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"241498905","text":"import os\nimport sys, getopt\nimport numpy as np\nfrom scipy import stats\nplot_result = True\ntry:\n\timport matplotlib\n\tmatplotlib.use('Agg')\n\timport matplotlib.pyplot as plt\nexcept:\n\tprint (\"Could not find matplotlib; will continue without distribution plot\")\n\tplot_result = False\n\ndef main(argv):\n inputfile = ''\n output_name = ''\n try:\n opts, args = getopt.getopt(argv,\"hi:o:f:\",[\"input_file=\",\"out_name=\",\"fasta_file=\"])\n except getopt.GetoptError:\n print ('usage: assess_TIS_annotation.py -i -f -o ')\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print ('usage: assess_TIS_annotation.py -i -f -o ')\n sys.exit()\n elif opt in (\"-i\", \"--input_file\"):\n inputfile = arg\n elif opt in (\"-o\", \"--out_name\"):\n output_name = arg\n elif opt in (\"-f\", \"--fasta_file\"):\n fasta_file = arg\n if len(inputfile) == 0 or len(output_name) == 0:\n print ('usage: assess_TIS_annotation.py -i -f -o ')\n sys.exit()\n return inputfile,fasta_file,output_name\n\n\n \n\n## Analysis parameters ##\nout_dir = \"output/\"\ncodon_search_window = 198\nstart_codons = [\"ATG\", \"GTG\", \"TTG\"]\nstop_codons = [\"TAA\", \"TAG\", \"TGA\"]\nrelative_scores = True\nmin_number_of_orfs = 500\n\n\ndef init_genome(input_file,fasta_file,outname):\n\tgenome_seq,genome_gc = get_genome_seq(fasta_file)\n\tgenome_orfs,name = read_ptt(input_file)\n\tif len(genome_orfs) < min_number_of_orfs:\n\t\tprint (\"Number of ORFs below threshold, exiting\")\n\t\tsys.exit()\n\tcandidate_starts_per_orf, initial_pca_keys = get_codon_search_seqs(genome_orfs,genome_seq,name,genome_gc)\n\treturn None\n \ndef get_genome_seq(fasta_file):\n\tgenome_seq = \"\"\n\tfna = open(fasta_file,\"r\")\n\tfor line in fna:\n\t\tline = line.strip()\n\t\tif line[0] != \">\":\n\t\t\tgenome_seq += line\n\tfna.close()\n\tgenome_seq = genome_seq.upper()\n\tgenome_length = float(len(genome_seq))\n\tgc = (genome_seq.count(\"G\")+genome_seq.count(\"C\"))/genome_length\n\tprint (\"Loading genome sequence done..\")\n\tprint (\"GC%:\\t\",round(gc,3))\n\treturn genome_seq,gc\n\t\ndef reverse_sequence(sequence):\n\tsequence = sequence[::-1]\n\tsequence = sequence.replace(\"A\",\"X\")\n\tsequence = sequence.replace(\"T\",\"A\")\n\tsequence = sequence.replace(\"X\",\"T\")\n\tsequence = sequence.replace(\"C\",\"X\")\n\tsequence = sequence.replace(\"G\",\"C\")\n\tsequence = sequence.replace(\"X\",\"G\")\n\treturn sequence\n\ndef read_ptt(genome):\n\tgenome_orfs = {}\n\tptt_file = open(genome,\"r\")\n\tlines = ptt_file.readlines()\n\tptt_file.close()\n\tname = lines[0].split(\",\")[0]\n\tfor line in lines[3:]:\n\t\tline = line.strip().split(\"\\t\")\n\t\tloc = line[0].split(\"..\")\n\t\tstrand = line[1]\n\t\tlocus_tag = line[5]\n\t\tgenome_orfs[locus_tag] = [int(loc[0]),int(loc[1]),strand]\n\tprint (\"Loading ORF annotation done...\")\n\treturn genome_orfs,name\n\t\ndef get_codon_search_seqs(genome_orfs,genome_seq,name,genome_gc):\n\tcandidate_starts_per_orf = {}\n\tinitial_pca_keys = []\n\tcandidate_start_codons = []\n\tall_candidate_start_codons = []\n\tstart_freqs_up = {}\n\tstart_freqs_down = {}\n\tcount = 0\n\t\n\t#coding start probality\n\tcoding_starts = 0\n\tcoding_triplets = 0\n\tupstream_longest_orf_starts = 0\n\tupstream_longest_orf_triplets = 0\n\n\tfor i in range(codon_search_window*-1,0,3):\n\t\tstart_freqs_up[i] = 0\n\tfor i in range(0,codon_search_window+1,3):\n\t\tstart_freqs_down[i] = 0\n\t\t\t\n\tfor gene in sorted(genome_orfs.keys()):\n\t\tdist_from_longest_orf = 0\n\t\torf_candidate_start_codons = []\n\t\tstart,stop,strand = genome_orfs[gene]\n\t\tstart -= 1\n\t\tend_seq_window = codon_search_window\n\t\tif (stop-start)0:\n\t\t\tlongest_orf = candidate_starts_upstream[-1][1]\n\t\telse:\n\t\t\tlongest_orf = start\n\t\t# Don't include the first 30 nt\n\t\tif strand == \"+\":\n\t\t\tlongest_orf_up_seq = genome_seq[longest_orf-codon_search_window:longest_orf]\n\t\tif strand == \"-\":\n\t\t\tlongest_orf_up_seq = genome_seq[longest_orf:longest_orf+codon_search_window]\n\t\t\tlongest_orf_up_seq = reverse_sequence(longest_orf_up_seq)\n\n\t\torfs = find_candidate_starts_no_stop_check(longest_orf_up_seq,\"backward\",strand,start)\n\t\tupstream_longest_orf_starts += len(orfs)\n\t\tupstream_longest_orf_triplets += len(longest_orf_up_seq)/3\n\t\t\n\t\t\t\t\n\t\tcandidate_starts_downstream = find_candidate_starts(down_seq,\"forward\",strand,start)\n\t\tfor candidate_start_downstream in candidate_starts_downstream:\n\t\t\trelative_position = int(candidate_start_downstream[0])\n\t\t\tstart_freqs_down[relative_position] += 1\n\t\n\tcombined_dict = start_freqs_up\n\tfor key in start_freqs_down.keys():\n\t\tcombined_dict[key] = start_freqs_down[key]\n\n\tcoding_alt_start_freq = coding_starts/float(coding_triplets)\n\tupstream_alt_start_freq = upstream_longest_orf_starts/float(upstream_longest_orf_triplets)\n\tplot_data(combined_dict,name,len(genome_orfs.keys()),coding_alt_start_freq,upstream_alt_start_freq,genome_gc)\n\n\treturn candidate_starts_per_orf, initial_pca_keys\n\t\ndef find_candidate_starts(sequence,direction,strand,start):\n\trelative_position = None\n\talternative_starts = []\n\tcodon = None\n\tif direction == \"backward\":\n\t\tfor i in range(len(sequence)-3,0,-3):\n\t\t\tif sequence[i:i+3] in start_codons:\n\t\t\t\tcodon = sequence[i:i+3]\n\t\t\t\trelative_position = i-len(sequence)\n\t\t\t\tabsolute_position = relative_position+start\n\t\t\t\tif strand == \"-\":\n\t\t\t\t\tabsolute_position = start-relative_position\n\t\t\t\talternative_starts.append([relative_position,absolute_position,codon])\n\t\t\tif sequence[i:i+3] in stop_codons:\n\t\t\t\tbreak\n\tif direction == \"forward\":\n\t\tfor i in range(0,len(sequence),3):\n\t\t\tif sequence[i:i+3] in start_codons:\n\t\t\t\tcodon = sequence[i:i+3]\n\t\t\t\trelative_position = i\n\t\t\t\tabsolute_position = relative_position+start+3\n\t\t\t\trelative_position += 3\n\t\t\t\tif strand == \"-\":\n\t\t\t\t\tabsolute_position = start-relative_position\n\t\t\t\talternative_starts.append([relative_position,absolute_position,codon])\n\t\t\tif sequence[i:i+3] in stop_codons:\n\t\t\t\tbreak\n\treturn alternative_starts\n\ndef find_candidate_starts_no_stop_check(sequence,direction,strand,start):\n\trelative_position = None\n\talternative_starts = []\n\tcodon = None\n\tif direction == \"backward\":\n\t\tfor i in range(len(sequence)-3,0,-3):\n\t\t\tif sequence[i:i+3] in start_codons:\n\t\t\t\tcodon = sequence[i:i+3]\n\t\t\t\trelative_position = i-len(sequence)\n\t\t\t\tabsolute_position = relative_position+start\n\t\t\t\tif strand == \"-\":\n\t\t\t\t\tabsolute_position = start-relative_position\n\t\t\t\talternative_starts.append([relative_position,absolute_position,codon])\n\tif direction == \"forward\":\n\t\tfor i in range(0,len(sequence),3):\n\t\t\tif sequence[i:i+3] in start_codons:\n\t\t\t\tcodon = sequence[i:i+3]\n\t\t\t\trelative_position = i\n\t\t\t\tabsolute_position = relative_position+start+3\n\t\t\t\tif strand == \"+\":\n\t\t\t\t\trelative_position += 3\n\t\t\t\tif strand == \"-\":\n\t\t\t\t\tabsolute_position = start-relative_position-3\n\t\t\t\talternative_starts.append([relative_position,absolute_position,codon])\n\treturn alternative_starts\n\n\ndef plot_data(combined_dict,name,number_of_orfs,coding_alt_start_freq,upstream_alt_start_freq,genome_gc):\n\tN = len(combined_dict.keys())\n\tvalues = []\n\tfunction_values = []\n\tkeys = sorted(combined_dict.keys())\n\tat = (1-genome_gc)/2\n\tgc = genome_gc/2\n\tstop_probability = at*at*at + at*gc*at + at*gc*at\n\tfor label in keys:\n\t\tvalues.append(combined_dict[label])\n\t\tif label<0:\n\t\t\t#Upstream\n\t\t\tfunction_values.append(number_of_orfs*(upstream_alt_start_freq)*(1-stop_probability)**(abs(label)/3))\n\t\telse:\n\t\t\t#Coding\n\t\t\tfunction_values.append(number_of_orfs*(coding_alt_start_freq))\n\tif plot_result == True:\n\t\tind = np.arange(N) # the x locations for the groups\n\t\twidth = 1 # the width of the bars\n\t\tfig = plt.figure()\n\t\tfig.set_size_inches(18.5,10.5)\n\t\tax = fig.add_subplot(111)\n\t\tax.bar(ind, values, width, color='CornflowerBlue',edgecolor = \"black\") #RoyalBlue?\n\t\tax.set_xticks(ind)\n\t\tax.set_xticklabels(keys,rotation='vertical')\n\t\tfig.set_size_inches(18.5,10.5)\n\t\tname = name.replace(\"\\\\\",\"\")\n\t\tname = name.replace(\"/\",\"\")\n\t\tax.plot(function_values,color=\"r\",linewidth=2)\n\t\tplt.ylim([0,500])\n\t\tax.set_title(name)\n\ttry:\n\t\tcorrelation = stats.spearmanr(values,function_values)\n\t\tcorrelation_up = stats.spearmanr(values[0:65],function_values[0:65])\n\t\tprint (\"Quality correlation:\\t\",round(correlation_up[0],3))\n\t\tif not os.path.exists(out_dir):\n\t\t\tos.makedirs(out_dir)\n\t\toutput_file = open(out_dir+output_name+\"_correlation.txt\",\"w\")\n\t\toutput_file.write(\"Name\\tGC-percentage\\t#ORFs\\tCorrelation Complete\\tCorrelation Upstream\\n\")\n\t\toutput_file.write(name+\"\\t\"+str(round(genome_gc,2))+\"\\t\"+str(number_of_orfs)+\"\\t\"+str(round(correlation[0],2))+\"\\t\"+str(round(correlation_up[0],2))+\"\\n\")\n\t\toutput_file.close()\n\t\tprint (\"TIS correlation file generated..\")\n\texcept:\n\t\tprint (\"Could not write output file..\")\n\t\tpass\n\tif plot_result == True:\n\t\ttry:\n\t\t\tfor label in ax.get_xticklabels():\n\t\t\t\tlabel.set_fontsize(6)\n\t\t\tif not os.path.exists(out_dir):\n\t\t\t\tos.makedirs(out_dir)\n\t\t\tfig.savefig(out_dir+name+'_distribution.png')\n\t\t\tplt.clf()\n\t\t\tprint (\"TIS distribution plot generated...\")\n\t\texcept:\n\t\t\tprint (\"plot for\", name,\"save failed...\")\n\t\t\tplt.clf()\n\treturn None\n\nif __name__ == \"__main__\":\n input_file,fasta_file,output_name = main(sys.argv[1:])\t\n init_genome(input_file,fasta_file,output_name)\n","sub_path":"assess_TIS_annotation_converted.py","file_name":"assess_TIS_annotation_converted.py","file_ext":"py","file_size_in_byte":10674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"631040253","text":"# first_row = '.^^.^.^^^^'\nfirst_row = open('input.txt').read()\n\ndef next_row(row):\n new = ''\n for i in range(len(row)):\n l = row[i-1] if i > 0 else '.'\n c = row[i]\n r = row[i + 1] if i < len(row)-1 else '.'\n prev = l + c + r\n if prev in ('^^.', '.^^', '^..', '..^'):\n new += '^'\n else:\n new += '.'\n return new\n\nrows = [first_row]\nfor i in range(1, 400000):\n rows.append(next_row(rows[i-1]))\n\nprint(sum(sum(1 for c in row if c == '.') for row in rows))","sub_path":"2016/day18/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"436827365","text":"#!/usr/bin/env python3.6\n# -*- coding: utf-8 -*-\n#@Author: Yang Xiaojun\n\nimport os\n\nimport time\nimport re\nimport copy\n\nzh_pattern = re.compile(u'[\\u4e00-\\u9fa5]+')\npick_zh= re.compile(u'[^\\u4e00-\\u9fa5]+')#排除非中文\n\ndef contain_zh(lis):\n '''\n 判断传入字符串是否包含中文\n :param word: 待判断字符串\n :return: True:包含中文 False:不包含中文\n '''\n i=2\n for word in lis:\n # word = word.decode()\n if i==2:\n # print('y:',type(lis))\n i=3\n global zh_pattern\n match = zh_pattern.search(word)\n if match:\n return match\n return False\n\ndef from_book():\n add=r'C:\\Users\\XUEJW\\Desktop\\兴业数据\\一万句zh_en\\中英文语料库\\词典'\n write_zh=r'C:\\Users\\XUEJW\\Desktop\\兴业数据\\一万句zh_en\\中英文语料库\\词典\\_zh.txt'\n f1 = open(write_zh, 'w')\n\n write_en=r'C:\\Users\\XUEJW\\Desktop\\兴业数据\\一万句zh_en\\中英文语料库\\词典\\_en.txt'\n f2 = open(write_en, 'w')\n mark=0\n wm=0\n i=1\n for (root,dirs,files) in os.walk(add):\n for item in files:\n t1=0\n if_m=0\n slist=''\n if_dic={}\n then_dic={}\n with open(add+'\\\\'+item,'r+',encoding='utf-8') as f:\n if_dic = {}\n then_dic = {}\n fx=f.readlines()\n t1 = 0\n if_m = 0\n #print('0::',len(fx))\n for s in fx:# s is str\n if contain_zh(s):\n try:\n f1.write(s)\n except Exception as e:\n print('err:', e)\n\n else:\n # if i< 3:\n # print('y:',s,len(s))\n # i += 1\n if len(s)>1:#换行符也占一位\n try:\n f2.write(s)\n except Exception as e:\n print('err:',e)\n\n f1.close()\n f2.close()\ndef pick_one(lis):#多个翻译取最短那个\n r1 = '\\(.*\\) '#转义要\n r2 = ' \\(.*\\)'\n lth=float(\"inf\")\n for cel in lis:\n cel = re.sub(r1, '', cel)\n cel = re.sub(r2, '', cel)\n cel=cel.strip()\n comp=cel.split(' ')\n if len(comp)1:\n if contain_zh(lis[0]):\n pu=pick_one(lis[1:])\n zh=lis[0]+','+pu+'\\n'\n # print('2:',pu,zh)\n f1.write(zh)\ndef from_dict_en():\n add = r'C:\\Users\\XUEJW\\Desktop\\兴业数据\\一万句zh_en\\中英文语料库\\未处理词典'\n write_zh = r'C:\\Users\\XUEJW\\Desktop\\兴业数据\\一万句zh_en\\中英文语料库\\zh_to_en.txt'\n f1 = open(write_zh, 'w')\n #\n # write_en = r'C:\\Users\\XUEJW\\Desktop\\兴业数据\\一万句zh_en\\中英文语料库\\词典\\_en.txt'\n # f2 = open(write_en, 'w')\n\n for (root,dirs,files) in os.walk(add):\n for item in files:\n row1=[]\n\n row2=[]\n with open(add+'\\\\'+item,'r+',encoding='utf-8') as f:\n fx = f.readlines()\n for x in fx:\n zh=''\n lis=list(x.strip().split(','))\n print('1:',lis)\n if len(lis)>1:\n\n for zcel in lis[1:]:\n zh_in=pick_zh_out(zcel)\n if len(zh_in)>0:\n\n zh = zh_in+ ',' +lis[0] + '\\n'\n print('2:',zh)\n f1.write(zh)\n\n","sub_path":"untiltled code/ecif_xy/get_txt.py","file_name":"get_txt.py","file_ext":"py","file_size_in_byte":4781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"514564800","text":"# coding:utf-8\n# training on voc\n\nfrom tensorflow.compat.v1 import ConfigProto\nfrom tensorflow.compat.v1 import InteractiveSession\nconfig = ConfigProto(allow_soft_placement=True)\nconfig.gpu_options.allow_growth = True\nsession = InteractiveSession(config=config)\n\nimport tensorflow as tf\nfrom src.Data_voc import Data\nfrom src.YOLO import YOLO\nfrom os import makedirs\nfrom os import path\nfrom os import environ\nimport config_yolov4 as config\nimport time\nimport numpy as np\nfrom src import Log\nfrom src import Optimizer\nfrom src import Learning_rate as Lr\nfrom src.Loss_v4tiny import Loss\nfrom src.parallel_v import average_gradients,assign_to_device\n\n#environ['CUDA_VISIBLE_DEVICES'] = '0,1'\nwidth = config.width\nheight = config.height\nprint('w:',width,' h: ',height)\nsize = config.size\nbatch_size = config.batch_size\nclass_num = config.voc_class_num\nif config.voc_anchors:\n anchors = np.asarray(config.voc_anchors).astype(np.float32).reshape([-1, 3, 2])\nelse:\n anchors = None\niou_thresh = config.iou_thresh\nprob_thresh = config.prob_thresh\nscore_thresh = config.score_thresh\n\nweight_decay = config.weight_decay\ncls_normalizer = config.cls_normalizer\niou_normalizer = config.iou_normalizer\n\nlr_type = config.lr_type\nlr_init = config.lr_init\nlr_lower = config.lr_lower\npiecewise_boundaries = config.piecewise_boundaries\npiecewise_values = config.piecewise_values\noptimizer_type = config.optimizer_type\nmomentum = config.momentum\n\n#pretrain_model_path = config.pretrain_model_path\nnames_file = config.voc_names\ndata_debug = config.data_debug\nmodel_name = config.voc_model_name\nmodel_path = config.voc_model_path\ntotal_epoch = config.total_epoch\nsave_per_epoch = config.save_per_epoch\nvoc_root_dir = config.voc_root_dir\nprint('voc_root_dir: ',voc_root_dir)\nMOVING_AVERAGE_DECAY = 0.99\n\n\n# get current epoch \ndef compute_curr_epoch(global_step, batch_size, imgs_num):\n '''\n global_step:current step\n batch_size:batch_size\n imgs_num: total images number\n '''\n epoch = global_step * batch_size / imgs_num\n return tf.cast(epoch, tf.int32)\n\n\n# for parallel--------------------------------------------------------------------\nnum_gpus = config.num_gpus\n\n\n# def solve_cudnn_error():\n# #gpus = ConfigProto.experimental.list_physical_devices('GPU')\n# gpus = tf.config.list_physical_devices('GPU')\n# if gpus:\n# try:\n# # Currently, memory growth needs to be the same across GPUs\n# for gpu in gpus:\n# tf.config.experimental.set_memory_growth(gpu, True)\n# logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n# print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\n# except RuntimeError as e:\n# # Memory growth must be set before GPUs have been initialized\n# print(e)\n\n\n#------------------------------------------------------------------------------------\n\n# training\ndef backward():\n yolo = YOLO()\n tf.reset_default_graph() \n # dataset\n print('train before data')\n fh = open('{}_log.txt'.format(model_path),'w')\n \n if not path.exists(model_path):\n makedirs(model_path)\n print('create model save folder: ',model_path)\n \n data = Data(voc_root_dir, names_file, class_num, batch_size*num_gpus, \n anchors, is_tiny=False, size=size)\n imgs_ls = data.imgs_path\n labels_ls = data.labels_path\n print('train imgs_ls: ',imgs_ls[:5])\n print('train labels_ls: ',labels_ls[:5])\n # create dataset\n dataset = tf.data.Dataset.from_tensor_slices((imgs_ls, labels_ls))\n dataset = dataset.shuffle(len(imgs_ls)) # shuffle\n dataset = dataset.batch(batch_size=batch_size *num_gpus) # for parallel\n dataset = dataset.map(\n lambda imgs_batch, xmls_batch: tf.py_func(\n data.load_tf_batch_data, \n inp=[(imgs_batch, xmls_batch)],\n Tout=[tf.float32, tf.float32, tf.float32, tf.float32]),\n num_parallel_calls=4*num_gpus\n )\n dataset = dataset.prefetch(20)\n # repeat\n dataset = dataset.repeat()\n # iterator\n iterator = dataset.make_initializable_iterator()\n inputs, y1_true, y2_true ,y3_true= iterator.get_next() #y1 = 13、19 y2=26、38\n #print('iterator.get_next():',iterator.get_next())\n \n # set shape\n with tf.device('/cpu:0'):\n tower_grads = []\n reuse_vars = None\n #inputs.set_shape([batch_size *num_gpus, None, None, None, 3])\n inputs.set_shape([None, None, None, 3])\n y1_true.set_shape([batch_size *num_gpus, None, None, 3, 5+class_num]) # 5: xywh,score\n y2_true.set_shape([batch_size *num_gpus, None, None, 3, 5+class_num])\n y3_true.set_shape([batch_size *num_gpus, None, None, 3, 5+class_num])\n \n \n \n global_step = tf.Variable(0, trainable=False)\n epoch = compute_curr_epoch(global_step, batch_size, len(data.imgs_path))\n lr = Lr.config_lr(lr_type, lr_init, lr_lower=lr_lower, \\\n piecewise_boundaries=piecewise_boundaries, \\\n piecewise_values=piecewise_values, epoch=epoch)\n #optimizer = Optimizer.config_optimizer(optimizer_type, lr, momentum)\n optimizer = Optimizer.config_optimizer('momentum', lr, momentum)\n \n \n\n #with tf.variable_scope('gpus') :\n with tf.variable_scope(tf.get_variable_scope()) as vscope:\n for i in range(num_gpus):\n print('/gpu: ',i)\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('GPU_%d' % i) as scope: \n\n print('tf.get_variable_scope().reuse: ',tf.get_variable_scope().reuse)\n print('tf.get_variable_scope().original_name_scope: ',tf.get_variable_scope().original_name_scope)\n # Split data between GPUs\n\n _inputs = inputs[i * batch_size: (i+1) * batch_size]\n _y1_true = y1_true[i * batch_size: (i+1) * batch_size, ...]\n _y2_true = y2_true[i * batch_size: (i+1) * batch_size, ...]\n _y3_true = y3_true[i * batch_size: (i+1) * batch_size, ...]\n\n # Because Dropout have different behavior at training and prediction time, we\n # need to create 2 distinct computation graphs that share the same weights.\n\n # Create a graph for training\n\n feature_y1, feature_y2, feature_y3 = yolo.forward(_inputs, class_num, weight_decay=weight_decay, isTrain=True,reuse=reuse_vars)\n\n # Create another graph for testing that reuse the same weights\n\n\n # feature_y1_test, feature_y2_test, feature_y3_test = yolo.forward(_inputs, class_num, weight_decay=weight_decay, isTrain=True,reuse=True)\n\n# global_step = tf.Variable(0, trainable=False)\n\n # loss value of yolov4\n loss = Loss().yolo_loss([feature_y1, feature_y2, feature_y3], \n [_y1_true, _y2_true, _y3_true], \n [anchors[2],anchors[1], anchors[0]], \n width, height, class_num,\n cls_normalizer=cls_normalizer,\n iou_normalizer=iou_normalizer,\n iou_thresh=iou_thresh, \n prob_thresh=prob_thresh, \n score_thresh=score_thresh)\n l2_loss = tf.compat.v1.losses.get_regularization_loss() \n\n reuse_vars = True #\n tf.get_variable_scope().reuse_variables()\n\n# epoch = compute_curr_epoch(global_step, batch_size, len(data.imgs_path))\n\n\n# lr = Lr.config_lr(lr_type, lr_init, lr_lower=lr_lower, \\\n# piecewise_boundaries=piecewise_boundaries, \\\n# piecewise_values=piecewise_values, epoch=epoch)\n# #optimizer = Optimizer.config_optimizer(optimizer_type, lr, momentum)\n# optimizer = Optimizer.config_optimizer('momentum', lr, momentum)\n\n # update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS,scope = scope) # scope = scope add 2/10\n # #update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS) \n # with tf.control_dependencies(update_ops): #can ensure run update_ops fisrt ,then do things within below code block \n gvs = optimizer.compute_gradients(loss+l2_loss)\n clip_grad_var = [gv if gv[0] is None else[tf.clip_by_norm(gv[0], 100.), gv[1]] for gv in gvs]\n #print('clip_grad_var: ',clip_grad_var)\n # train_step = optimizer.apply_gradients(clip_grad_var, global_step=global_step)\n\n\n reuse_vars = True #\n tower_grads.append(clip_grad_var) #\n tf.get_variable_scope().reuse_variables()\n\n #print('tower_grads: ',tower_grads)\n \n tower_grads = average_gradients(tower_grads) #\n tower_clip_grad_var = [tower_gv if tower_gv[0] is None else[tf.clip_by_norm(tower_gv[0], 100.), tower_gv[1]] for tower_gv in tower_grads]\n \n update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS) # scope = scope add 2/10\n with tf.control_dependencies(update_ops):\n train_step = optimizer.apply_gradients(tower_clip_grad_var, global_step=global_step) #\n \n ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)\n ema_op = ema.apply(tf.trainable_variables())\n \n with tf.control_dependencies([train_step, ema_op]):\n train_op = tf.no_op(name = 'train')\n\n #solve_cudnn_error()\n# g_list = tf.global_variables()\n# bn_moving_mean_vars = [g for g in g_list if 'moving_mean' in g.name]\n# bn_moving_variance_vars = [g for g in g_list if 'moving_variance' in g.name]\n \n # initialize\n config_ = ConfigProto(allow_soft_placement=True)\n config_.gpu_options.allow_growth = True\n init = tf.compat.v1.global_variables_initializer()\n init_local = tf.compat.v1.local_variables_initializer()\n saver = tf.compat.v1.train.Saver()\n \n with tf.compat.v1.Session(config = config_) as sess:\n sess.run([init,init_local])\n sess.run(iterator.initializer)\n step = 0\n \n \n ckpt = tf.compat.v1.train.get_checkpoint_state(model_path)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]\n #step = eval(step)\n step = 0\n Log.add_log(\"message: load ckpt model, global_step=\" + str(step))\n else:\n Log.add_log(\"message:can not fint ckpt model\")\n \n curr_epoch = step // data.steps_per_epoch\n print('curr_epoch: ',curr_epoch)\n \n while curr_epoch < total_epoch:\n for _ in range(data.steps_per_epoch):\n start = time.perf_counter()\n _, loss_, step, lr_= sess.run([train_op, loss, global_step, lr ])\n #_ , step, lr_ = sess.run([train_op,global_step, lr])\n end = time.perf_counter()\n# print('bnmean :',bnmean)\n# print('bnvari: ',bnvari)\n \n# if (loss_ > 1e3) and (step > 1e3):\n# Log.add_log(\"error:loss exception, loss_value = \"+str(loss_))\n# ''' break the process or lower learning rate '''\n# raise ValueError(\"error:loss exception, loss_value = \"+str(loss_)+\", please lower your learning rate\")\n# # lr = tf.math.maximum(tf.math.divide(lr, 10), config.lr_lower)\n\n if step % 5 == 2:\n print(\"step: %6d, epoch: %3d, loss: %.5g\\t, wh: %3d, lr:%.5g\\t, time: %5f s\"\n %(step, curr_epoch, loss_, width, lr_, end-start))\n# print(\"step: %6d, epoch: %3d, , wh: %3d, time: %5f s\"\n# %(step, curr_epoch, width, end-start))\n Log.add_loss(str(step) + \"\\t\" + str(loss_))\n fh.write(\"step: %6d, epoch: %3d, loss: %.5g\\t, wh: %3d, lr:%.5g\\t, time: %5f s\\n\"\n %(step, curr_epoch, loss_, width, lr_, end-start))\n\n curr_epoch += 1\n \n if curr_epoch % save_per_epoch == 0:\n # save ckpt model\n Log.add_log(\"message: save ckpt model, step=\" + str(step) +\", lr=\" + str(lr_))\n #Log.add_log(\"message: save ckpt model, step=\" + str(step))\n saver.save(sess, path.join(model_path, model_name), global_step=step) \n \n Log.add_log(\"message: save final ckpt model, step=\" + str(step))\n saver.save(sess, path.join(model_path, model_name), global_step=step)\n fh.close() \n return 0\n\n\nif __name__ == \"__main__\":\n Log.add_log(\"message: into VOC backward function\")\n \n # Log.add_loss(\"###########\")\n backward()\n tf.reset_default_graph()","sub_path":"train_voc_tf_data_parallel_v2_yolov4.py","file_name":"train_voc_tf_data_parallel_v2_yolov4.py","file_ext":"py","file_size_in_byte":13922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"36745733","text":"import pygame\nimport classesEskiv\n\n# Define some colors\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\n\npygame.init()\n\n# Set the height and width of the screen\nscreen_width = 700\nscreen_height = 500\nscreen = pygame.display.set_mode([screen_width, screen_height])\n\npygame.display.set_caption(\"Eskiv Interstellar\")\n\n# Loop until the user clicks the close button.\ndone = False\n\n# Used to manage how fast the screen updates\nclock = pygame.time.Clock()\n\nbackground_img = pygame.image.load (\"space-4.jpg\").convert()\n\n# This is a list of sprites.\nobjects = pygame.sprite.Group()\n\n# This is a list of every sprite.\nall_sprites_list = pygame.sprite.Group()\n\n#Generates the player in the middle of the plane.\nplayer = classesEskiv.Player(screen_width//2-12, screen_height//2-12)\nall_sprites_list.add(player)\n\ntarget=classesEskiv.Target()\nobjects.add(target)\nall_sprites_list.add(target)\n\n\n\n# -------- Main Program Loop -----------\nwhile not done:\n # --- Main event loop\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n player.velocity[0]=1\n elif event.key == pygame.K_DOWN:\n player.velocity[1]=1\n elif event.key == pygame.K_LEFT:\n player.velocity[2]=1\n elif event.key == pygame.K_RIGHT:\n player.velocity[3]=1\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_UP:\n player.velocity[0]=0\n elif event.key == pygame.K_DOWN:\n player.velocity[1]=0\n elif event.key == pygame.K_LEFT:\n player.velocity[2]=0\n elif event.key == pygame.K_RIGHT:\n player.velocity[3]=0\n\n # --- Game logic should go here\n\n # Makes sure no objects move outside the screen.\n for sprite in all_sprites_list:\n sprite.stayInside()\n\n # Moves the player\n player.move()\n\n #Moves all other objects\n for sprite in objects:\n if sprite.blockType==2:\n sprite.move()\n\n # --------------- This section checks if the player has collided with anything and decides what to do: ----------------------- #\n\n sprites_hit_list = pygame.sprite.spritecollide(player, objects, False)\n\n for sprite in sprites_hit_list:\n if sprite.blockType == 1:\n player.score+=1\n print(\"Your current score is: \" + str(player.score))\n\n #moves the target to a different position:\n newTargetCoords=classesEskiv.regenerateNewTarget(sprite, screen_width, screen_height)\n\n #Generates a new bounceball enemy-object\n temp=classesEskiv.generateEnemy(screen_width, screen_height, objects, all_sprites_list)\n #Refreshes the content of the sprite groups.\n objects=temp[0]\n all_sprites_list=temp[1]\n # Checks if the player hit an enemy bounceball.\n elif sprite.blockType == 2:\n\n done = True\n print(\"You got \" + str(player.score) + \" points.\")\n break\n\n # --------------- This section checks if the player has collided with anything and decides what to do: ----------------------- #\n\n # --- Screen-clearing code goes here\n\n # Here, we clear the screen to white. Don't put other drawing commands\n # above this, or they will be erased with this command.\n\n # If you want a background image, replace this clear with blit'ing the\n # background image.\n screen.fill(WHITE)\n screen.blit (background_img, (0, 0))\n # Draw all the spites\n all_sprites_list.draw(screen)\n\n # --- Drawing code should go here\n\n # --- Go ahead and update the screen with what we've drawn.\n pygame.display.flip()\n\n # --- Limit to 60 frames per second\n clock.tick(60)\n\n# Close the window and quit.\npygame.quit()\n","sub_path":"Eskiv2/Eskiv2.py","file_name":"Eskiv2.py","file_ext":"py","file_size_in_byte":3911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"622278473","text":"\"\"\" Exception errors.\"\"\"\n\nfrom flask import jsonify\n\n\nclass NotFound():\n def response(self, entity, id):\n \"\"\"return Not found message.\"\"\"\n data = {\n \"status\": \"Failure\",\n \"message\": \"{} with id {} Not found\".format(entity, id)\n }\n return jsonify(data), 404\n","sub_path":"api/common/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"56408109","text":"import sys\nimport time\nimport numpy as np\nimport scipy.io as io\nimport theano\nimport theano.tensor as T\n\ndef multiconstraint(*fs): \n return lambda H, V: (sum(map(lambda f: f(H, V), fs)) / len(fs))\n\ndef trajectory(traj):\n \n def trajectory_constraint(H, V):\n velocity_scale = 10\n return velocity_scale * T.mean((V[:,-7:-4] - traj)**2)\n \n return trajectory_constraint\n \ndef foot_sliding(labels):\n \n def foot_sliding_constraint(H, V):\n \n feet = np.array([[12,13,14], [15,16,17],[24,25,26], [27,28,29]])\n contact = (labels > 0.5).astype(theano.config.floatX)\n \n offsets = T.concatenate([\n V[:,feet[:,0:1]],\n T.basic.zeros((V.shape[0],len(feet),1,V.shape[2])),\n V[:,feet[:,2:3]]], axis=2)\n \n def cross(A, B):\n return T.concatenate([\n A[:,:,1:2]*B[:,:,2:3] - A[:,:,2:3]*B[:,:,1:2],\n A[:,:,2:3]*B[:,:,0:1] - A[:,:,0:1]*B[:,:,2:3],\n A[:,:,0:1]*B[:,:,1:2] - A[:,:,1:2]*B[:,:,0:1]\n ], axis=2)\n \n rotation = -V[:,-5].dimshuffle(0,'x','x',1) * cross(np.array([[[0,1,0]]]), offsets)\n \n velocity_scale = 10\n cost_feet_x = velocity_scale * T.mean(contact[:,:,:-1] * (((V[:,feet[:,0],1:] - V[:,feet[:,0],:-1]) + V[:,-7,:-1].dimshuffle(0,'x',1) + rotation[:,:,0,:-1])**2))\n cost_feet_z = velocity_scale * T.mean(contact[:,:,:-1] * (((V[:,feet[:,2],1:] - V[:,feet[:,2],:-1]) + V[:,-6,:-1].dimshuffle(0,'x',1) + rotation[:,:,2,:-1])**2))\n #cost_feet_y = T.mean(contact * ((V[:,feet[:,1]] - np.array([[0.75], [0.0], [0.75], [0.0]]))**2))\n cost_feet_y = velocity_scale * T.mean(contact[:,:,:-1] * ((V[:,feet[:,1],1:] - V[:,feet[:,1],:-1]) **2))\n cost_feet_h = 10.0 * T.mean(T.minimum(V[:,feet[:,1],1:], 0.0)**2)\n \n return (cost_feet_x + cost_feet_z + cost_feet_y + cost_feet_h) / 4\n \n return foot_sliding_constraint\n\ndef joint_lengths(\n parents=np.array([-1,0,1,2,3,4,1,6,7,8,1,10,11,12,12,14,15,16,12,18,19,20]),\n lengths=np.array([\n 2.40,7.15,7.49,2.36,2.37,7.43,7.50,2.41,\n 2.04,2.05,1.75,1.76,2.90,4.98,3.48,0.71,\n 2.73,5.24,3.44,0.62], dtype=theano.config.floatX)):\n\n def joint_lengths_constraint(H, V):\n \n J = V[:,:-7].reshape((V.shape[0], len(parents), 3, V.shape[2])) \n return T.mean((\n T.sqrt(T.sum((J[:,2:] - J[:,parents[2:]])**2, axis=2)) - \n lengths[np.newaxis,...,np.newaxis])**2)\n\n return joint_lengths_constraint\n \n \ndef constrain(X, forward, backward, preprocess, constraint, alpha=0.1, iterations=100):\n \n H = theano.shared(np.array(forward(theano.shared((X - preprocess['Xmean']) / preprocess['Xstd'])).eval()))\n V = (backward(H) * preprocess['Xstd']) + preprocess['Xmean']\n \n cost = constraint(H, V)\n \n self_alpha = alpha\n self_beta1 = 0.9\n self_beta2 = 0.999\n self_eps = 1e-05\n self_batchsize = 1\n\n self_params = [H]\n self_m0params = [theano.shared(np.zeros(p.shape.eval(), dtype=theano.config.floatX)) for p in self_params]\n self_m1params = [theano.shared(np.zeros(p.shape.eval(), dtype=theano.config.floatX)) for p in self_params]\n self_t = theano.shared(np.array([1], dtype=theano.config.floatX))\n\n gparams = T.grad(cost, self_params)\n m0params = [self_beta1 * m0p + (1-self_beta1) * gp for m0p, gp in zip(self_m0params, gparams)]\n m1params = [self_beta2 * m1p + (1-self_beta2) * (gp*gp) for m1p, gp in zip(self_m1params, gparams)]\n params = [p - (self_alpha / self_batchsize) * \n ((m0p/(1-(self_beta1**self_t[0]))) /\n (T.sqrt(m1p/(1-(self_beta2**self_t[0]))) + self_eps))\n for p, m0p, m1p in zip(self_params, m0params, m1params)]\n\n updates = ([( p, pn) for p, pn in zip(self_params, params)] +\n [(m0, m0n) for m0, m0n in zip(self_m0params, m0params)] +\n [(m1, m1n) for m1, m1n in zip(self_m1params, m1params)] +\n [(self_t, self_t+1)])\n\n constraint_func = theano.function([], cost, updates=updates)\n\n start = time.clock()\n for i in range(iterations):\n cost = constraint_func()\n print('Constraint Iteration %i, error %f' % (i, cost))\n print('Constraint: %0.4f' % (time.clock() - start))\n \n return (np.array(backward(H).eval()) * preprocess['Xstd']) + preprocess['Xmean']\n \n ","sub_path":"synth/constraints.py","file_name":"constraints.py","file_ext":"py","file_size_in_byte":4422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"108931977","text":"import graphlab as gl\nimport numpy as np\n\nhouse_data = gl.SFrame('kc_house_data.gl')\n\n\ndef to_numpy_data(sframe_data, features, target):\n sframe_data['constant'] = 1\n feature_matrix = sframe_data.select_columns(['constant'] + features)\n target_vector = sframe_data.select_column(target)\n return feature_matrix.to_numpy(), target_vector.to_numpy()\n\n\ndef predict_outcome(feature_matrix, weights):\n return np.dot(feature_matrix, weights)\n\n\ndef feature_derivative(errors, feature):\n return 2 * np.dot(errors, feature)\n\n\ndef regression_gradient_descent(feature_matrix, output, initial_weights, step_size, tolerance):\n converged = False\n weights = np.array(initial_weights)\n while not converged:\n # compute the predictions based on feature_matrix and weights:\n # compute the errors as predictions - output:\n predictions = predict_outcome(feature_matrix, weights)\n errors = predictions - output\n\n gradient_sum_squares = 0 # initialize the gradient\n # while not converged, update each weight individually:\n for i in range(len(weights)):\n # Recall that feature_matrix[:, i] is the feature column associated with weights[i]\n # compute the derivative for weight[i]:\n derivative = feature_derivative(errors, feature_matrix[:, i])\n\n # add the squared derivative to the gradient magnitude\n gradient_sum_squares += derivative ** 2\n\n # update the weight based on step size and derivative:\n weights[i] = weights[i] - step_size * derivative\n\n gradient_magnitude = np.sqrt(gradient_sum_squares)\n if gradient_magnitude < tolerance:\n converged = True\n return (weights)\n\n\ntrain_data, test_data = house_data.random_split(.8, seed=0)\n\nsimple_features = ['sqft_living']\nmy_output = 'price'\n(simple_feature_matrix, output) = to_numpy_data(train_data, simple_features, my_output)\ninitial_weights = np.array([-47000., 1.])\nstep_size = 7e-12\ntolerance = 2.5e7\n\nsimple_weights = regression_gradient_descent(\n simple_feature_matrix, output, initial_weights, step_size, tolerance)\nprint('===== QUIZ 1 =====')\nprint('Coefficients of simple model: {0}'.format(simple_weights))\n\nmodel_2_features = ['sqft_living', 'sqft_living15']\n(model_2_feature_matrix, output) = to_numpy_data(train_data, model_2_features, my_output)\nmodel_2_initial_weights = np.array([-100000., 1., 1.])\nmodel_2_step_size = 4e-12\nmodel_2_tolerance = 1e9\n\nmodel_2_weights = regression_gradient_descent(model_2_feature_matrix, output,\n model_2_initial_weights, model_2_step_size,\n model_2_tolerance)\nprint('Coefficients of model 2: {0}'.format(model_2_weights))\n\n\ndef do_prediction(dataset, features, target, model):\n feature_matrix, target_array = to_numpy_data(dataset, features, target)\n return predict_outcome(feature_matrix, model), target_array\n\n\nfirst_house = test_data[0:1]\nsimple_prediction, _ = do_prediction(first_house,\n features=['sqft_living'], target='price', model=simple_weights)\nmodel_2_prediction, _ = do_prediction(first_house,\n ['sqft_living', 'sqft_living15'], 'price', model_2_weights)\nprint('===== QUIZ 2 =====')\nprint('Price of first test house based on simple model: {0}'.format(simple_prediction))\nprint('Price of first test house based on model 2: {0}'.format(model_2_prediction))\nprint('Price of first house is: {0}'.format(first_house['price']))\n\n\ndef rss(dataset, features, target, model):\n predictions, target_array = do_prediction(dataset, features, target, model)\n return np.sum((target_array - predictions) ** 2)\n\n\nprint('===== QUIZ 3 =====')\nrss_of_simple_model = rss(test_data, ['sqft_living'], 'price', simple_weights)\nprint('RSS of simple model on test data is: {0}'.format(rss_of_simple_model))\n\nrss_of_model_2 = rss(test_data, ['sqft_living', 'sqft_living15'], 'price', model_2_weights)\nprint('RSS of model 2 on test data is: {0}'.format(rss_of_model_2))\n","sub_path":"course-2/week-2-multiple-regression-assignment-2.py","file_name":"week-2-multiple-regression-assignment-2.py","file_ext":"py","file_size_in_byte":4091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"288858053","text":"#!/usr/local/anaconda3/bin/python3.9\nimport re\nimport pandas as pd\nimport numpy as np\nimport sys\nimport os\nimport pwd\nuname = pwd.getpwuid(os.getuid()).pw_name\nsys.path.append('/work/'+uname+ '/project/zlib/')\n\nfrom zutils import get_prev_business_date, get_business_date_list\n\n\nfrom os.path import isfile,join\ndef file_filter(f):\n if f[-4:] in ['.csv'] and not re.match(r'tmp/.*', f):\n return True\n else:\n return False\n\ndef specialstr(maxkey,spp):\n slist = spp.split('.')\n if True: \n plist02 = maxkey.split(spp)\n p0 = plist02[0].split('.')\n p1 = plist02[1].split('.')\n \n p0list = [x for x in p0 if x != '']\n p1list = [x for x in p1 if x != '']\n flat_list = [item for sublist in [p0list[2:], [spp], p1list] for item in sublist]\n paramstr = ','.join(flat_list)\n\n return(paramstr)\n\ndef generate_key(t,maxkey,totaldf,srdict):\n #print(maxkey)\n if re.match(r'.*\\.0\\.2.*$',maxkey):\n paramstr = specialstr(maxkey,'0.2')\n elif re.match(r'.*\\.1\\.2.*$',maxkey):\n paramstr = specialstr(maxkey,'1.2')\n elif re.match(r'.*\\.2\\.4.*$',maxkey):\n paramstr = specialstr(maxkey,'2.4')\n elif re.match(r'.*\\.\\..*',maxkey):\n plist = maxkey.split('.')\n plist.remove('')\n \n params = [ str(round(int(i)/10,1)) if i in ['5'] else i for i in plist]\n \n plist =[ x for x in params if x != '' ] \n paramstr = ','.join(plist[2:])\n \n else:\n plist =[ x for x in maxkey.split('.') if x != '' ] \n paramstr = ','.join(plist[2:])\n\n isstr = ','.join([str(round(z,2)) for z in totaldf.loc[maxkey+'.is',] ] )\n osstr = ','.join([str(round(z,2)) for z in totaldf.loc[maxkey+'.os',] ] )\n finalstr = t + '=[' + paramstr + ']' + '#' + maxkey + ' ' + str(round(srdict[maxkey]/2,2)) + ' is ' + isstr + ';os ' + osstr \n return(finalstr)\n\ndef start_analysis(input_dir,output_dir,index_col,zfix):\n input_dir = '/work/jzhu/output/' + input_dir\n files = [f for f in os.listdir(input_dir) if isfile(join(input_dir, f))]\n csvfiles = list(filter(file_filter,files))\n print(input_dir,output_dir)\n #print(csvfiles)\n os.makedirs(output_dir,exist_ok=True)\n\n\n totaldf = pd.DataFrame()\n \n for f in csvfiles:\n t = f.split('.csv')[0]\n fin = input_dir + f\n print(fin)\n fsize = os.path.getsize(fin)\n if not fsize == 0:\n df = pd.read_csv(fin,sep = '\\s+',names=['sr','ret','vol','dd','txns'])\n totaldf = pd.concat([totaldf, df])\n\n else:\n print('File Szie is Zero:',fin)\n print('total',totaldf)\n\n tickerlist = []\n [ tickerlist.append('.'.join(f.split('.')[0:2])) for f in csvfiles if not '.'.join(f.split('.')[0:2]) in tickerlist] \n print(tickerlist)\n for t in tickerlist:\n if not os.environ['TICKER'] == '' and not t == os.environ['TICKER']:\n continue\n srdict = {}\n tmpdf = totaldf[totaldf.index.str.contains(t)].sort_index()\n for i in tmpdf.index:\n mi = i.split('.')[:-1]\n mistr = '.'.join(mi)\n \n if mistr in srdict:\n if os.environ['FILTERTYPE'] == 'sr':\n srdict[mistr] += tmpdf.loc[i,'sr']\n #print('checksr',i,tmpdf.loc[i,'sr'])\n elif os.environ['FILTERTYPE'] == 'dd':\n srdict[mistr] += tmpdf.loc[i,'ret'] / np.abs(tmpdf.loc[i,'dd'])\n else:\n assert(0)\n else:\n if os.environ['FILTERTYPE'] == 'sr':\n srdict[mistr] = tmpdf.loc[i,'sr']\n #print('checksr',i,tmpdf.loc[i,'sr'])\n elif os.environ['FILTERTYPE'] == 'dd':\n srdict[mistr] = tmpdf.loc[i,'ret'] / np.abs(tmpdf.loc[i,'dd'])\n else:\n assert(0)\n\n print(t)\n maxkey = max(srdict,key=lambda key: srdict[key])\n fs = generate_key(t,maxkey,totaldf,srdict)\n del srdict[maxkey]\n print(fs)\n\n maxkey1 = max(srdict,key=lambda key: srdict[key])\n fs1 = generate_key(t,maxkey1,totaldf,srdict)\n del srdict[maxkey1]\n print(fs1)\n\n maxkey2 = max(srdict,key=lambda key: srdict[key])\n fs2 = generate_key(t,maxkey2,totaldf,srdict)\n del srdict[maxkey2]\n print(fs2)\n\n\n maxkey3 = max(srdict,key=lambda key: srdict[key])\n fs3 = generate_key(t,maxkey3,totaldf,srdict)\n del srdict[maxkey3]\n print(fs3)\n\n maxkey4 = max(srdict,key=lambda key: srdict[key])\n fs4 = generate_key(t,maxkey4,totaldf,srdict)\n del srdict[maxkey4]\n print(fs4)\n\n if False:\n fout = output_dir + f\n if re.match(r'^.*\\..*\\.csv',f):\n fout = output_dir + f\n else:\n f_split = f.split('.')\n fout = output_dir + f_split[0] + '.PO.' + f_split[-1] \n print('fin',fin)\n print('fout',fout)\n\n\ndef main():\n import getopt, sys\n try:\n opts, args = getopt.getopt(sys.argv[1:],\"t:f:c:i:k:o:zdv\",[\"index_col=\",\"help\"])\n except getopt.GetoptError as err:\n print(str(err))\n usage()\n sys.exit(2)\n verbose = False\n root_dir = '/work/'+uname+'/'#os.getcwd()\n input_dir = None\n output_dir = None \n index_col = 'date' \n zfix = False\n os.environ['CALTYPE'] = 'XSHG'\n os.environ['DERIVED'] = ''\n os.environ['TICKER'] = ''\n os.environ['FILTERTYPE'] = 'sr' \n\n for o, a in opts:\n if o == \"-v\":\n verbose = True\n elif o in (\"-i\"):\n input_dir = a\n if a in ['iv30','dpi','gex','Index','chfrc']:\n os.environ['DERIVED'] = a\n elif o == ('-o'):\n output_dir = a \n elif o == ('-c'):\n os.environ['CALTYPE'] = a\n elif o == ('-t'):\n os.environ['TICKER'] = a \n elif o == ('-f'):\n os.environ['FILTERTYPE'] = a\n elif o == ('-z'):\n zfix = True \n elif o == ('--index_col') and not os.environ['DERIVED'] == '':\n index_col = a \n\n\n if input_dir is not None:\n if input_dir.find('/')!= -1 :\n id_split = input_dir.split('/')\n print('id_split', id_split[-1] )\n key_str = '/'.join(id_split[-5:-1]) if id_split[-1] == '' else '/'.join(id_split[-2:])\n output_dir = root_dir + '/data/pol/' + key_str + '/' \n else:\n output_dir = root_dir + '/data/pol/' + input_dir + '/' \n input_dir = root_dir + '/input/' + input_dir + '/' \n else:\n print('input_dir is missing')\n\n print(input_dir)\n input_dir = input_dir + '/' \n print('output_dir',output_dir) \n \n start_analysis(input_dir,output_dir,index_col,zfix)\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"getxxx.py","file_name":"getxxx.py","file_ext":"py","file_size_in_byte":6844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"359443653","text":"class Solution(object):\n\n def dfs(self, i):\n if i in self.s:\n return\n elif self.visited[i] == 0:\n self.visited[i] = 1\n self.s.append(i)\n for n in self.d[i]:\n self.dfs(n)\n\n def countComponents(self, n, edges):\n if n == 0:\n return 0\n\n if not edges:\n return n\n\n self.visited = [0] * n\n self.d = [[] for i in range(n)]\n c = 0\n for e in edges:\n self.d[e[0]].append(e[1])\n self.d[e[1]].append(e[0])\n\n for i in range(n):\n self.s = []\n self.dfs(i)\n if self.s:\n c += 1\n\n return c\n\n\nn = 5\nedges = [[0, 1], [1, 2], [2, 3], [3, 4]]\n\ntestClass = Solution()\n\nprint(testClass.countComponents(n, edges))\n","sub_path":"323-number-of-connected-components-in-an-undirected-graph/323.py","file_name":"323.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"5470566","text":"d1 = {}\nd2 = {'a':1, 'b': '2'}\nd3 = dict(a=1, b='2')\nd4 = dict([('a', 1), ('b', '2')])\nd5 = dict.fromkeys(['a', 'b'])\nprint(d5)\nd6 = dict.fromkeys(['a', 'b'], 2)\nprint(d6)\n\nd7 = {x: x ** 3 for x in (0, 1, 2, 3)}\nprint(d7)\nprint(d7[3])\ndel d7[1]\nprint(d7)\nexample = 2 in d7\nprint(example)\n\nuser = {\n'name': 'Олег',\n 'email': 'oleg@example.com',\n 'address': {\n 'city': 'Москва',\n 'street': 'Тверская'\n },\n 'hobby': ['рисование', 'пение']\n}\nprint(user['name'], user['address']['city'], user['hobby'])","sub_path":"Lesson2/dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"65405729","text":"#!/usr/bin/env python3\n\"\"\"\nExperimental streaming example. (alpha)\n\nDependencies:\n python 3.7\n\nUsage:\n $ ./grpc_stream.py --api_key --output_path \n\"\"\"\n\nfrom absl import app\nfrom absl import flags\n\nimport grpc_utils\n# NOTE: It will be replaced to Skelter Lab's pip package.\nfrom poodle.tts.server.jimin import compat_google_grpc_pb2\nfrom poodle.tts.server.jimin import compat_google_grpc_pb2_grpc\n\nflags.DEFINE_string('api_url', 'aiq.skelterlabs.com:443', 'AIQ portal address.')\nflags.DEFINE_string('api_key', None, 'AIQ project api key.')\nflags.DEFINE_boolean('insecure', None,\n 'Use plaintext and insecure connection.')\nflags.DEFINE_string('text', '안녕하세요. 스켈터랩스입니다.',\n 'Input text to synthesize.')\nflags.DEFINE_string('output_path', None, 'Output wav path.')\nFLAGS = flags.FLAGS\n\n\ndef main(args):\n del args # Unused\n\n channel = grpc_utils.create_channel(\n FLAGS.api_url, api_key=FLAGS.api_key, insecure=FLAGS.insecure)\n stub = compat_google_grpc_pb2_grpc.TextToSpeechStub(channel)\n\n synthesis_input = compat_google_grpc_pb2.SynthesisInput(text=FLAGS.text)\n voice = compat_google_grpc_pb2.VoiceSelectionParams(\n language_code='ko-KR', name='KO_KR_WOMAN_2')\n audio_config = compat_google_grpc_pb2.AudioConfig(\n audio_encoding='LINEAR16')\n request = compat_google_grpc_pb2.SynthesizeSpeechRequest(\n input=synthesis_input, voice=voice, audio_config=audio_config)\n responses = stub.StreamingSynthesizeSpeech(request)\n with open(FLAGS.output_path, mode='wb') as output_file:\n for response in responses:\n output_file.write(response.audio_content)\n\n\nif __name__ == '__main__':\n flags.mark_flags_as_required(['output_path'])\n app.run(main)\n","sub_path":"tts/grpc_stream.py","file_name":"grpc_stream.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"46116737","text":"\"\"\"Preprocess data for model usage\"\"\"\nimport logging\nfrom enum import Enum\nimport os.path\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Embedding\nfrom tensorflow.keras.layers.experimental.preprocessing import TextVectorization\nfrom data.data_store import DataStore\nfrom configuration.data_configuration import DataConfiguration\nfrom data.data_info import PriceDataInfo\nfrom configuration.configuration import TrainConfiguration, HyperParameterConfiguration, hp_cfg_is_cached, \\\n deserialize_hp_cfg, serialize_hp_cfg, train_cfg_is_cached, deserialize_train_cfg, serialize_train_cfg\n\nlogger = logging.getLogger(\"preprocessor\")\n\n\nclass EventType(Enum):\n \"\"\"To distinguish between event types for a stock\"\"\"\n\n PRESS_EVENT = \"PRESS\"\n NEWS_EVENT = \"NEWS\"\n NO_EVENT = \"NOEVENT\"\n\n\nclass DatasetType(Enum):\n \"\"\"Dataset Enum\"\"\"\n\n TRAIN_DS = \"train\"\n VAL_DS = \"val\"\n TEST_DS = \"test\"\n\n\ndef _preprocess_event_df(symbol_df, event_type):\n if event_type == EventType.NEWS_EVENT:\n symbol_df[\"date\"] = pd.to_datetime(symbol_df[\"publishedDate\"])\n symbol_df.drop([\"publishedDate\", \"site\", \"url\"], axis=1, inplace=True)\n else:\n symbol_df[\"date\"] = pd.to_datetime(symbol_df[\"date\"])\n\n symbol_df[\"date\"] = symbol_df[\"date\"].apply(lambda x: x.date())\n symbol_df[\"event_type\"] = event_type.value\n symbol_df[\"event_text\"] = symbol_df[\"title\"] + \" \" + symbol_df[\"text\"]\n\n return symbol_df.drop([\"title\", \"text\"], axis=1)\n\n\nclass Preprocessor:\n \"\"\"Preprocess data for model usage\"\"\"\n\n # pylint: disable=too-many-instance-attributes\n def __init__(\n self,\n data_store: DataStore,\n data_cfg: DataConfiguration,\n train_cfg: TrainConfiguration,\n hp_cfg: HyperParameterConfiguration\n ):\n self.data_store = data_store\n self.data_cfg = data_cfg\n\n assert (\n len(set(self.data_cfg.feedback_metrics) - set(PriceDataInfo.fields)) == 0\n ), \"API data price fields do not contain all fields that are configured as feedback metrics\"\n\n self.train_cfg = train_cfg\n self.hp_cfg = hp_cfg\n\n # advanced caching mechanism needs to safe new configurations\n self._old_preprocessing_result_can_be_reused = self._check_reusability_of_old_preprocessing()\n logger.info(\"Preprocessing result reusable: \" + str(self._old_preprocessing_result_can_be_reused))\n serialize_hp_cfg(self.hp_cfg)\n serialize_train_cfg(self.train_cfg)\n\n self.date_df = self._build_date_dataframe()\n\n # Predefine all dataframes for linter._.\n self._events_train_df = pd.DataFrame()\n self._gt_train_df = pd.DataFrame()\n self._events_val_df = pd.DataFrame()\n self._gt_val_df = pd.DataFrame()\n self._events_test_df = pd.DataFrame()\n self._gt_test_df = pd.DataFrame()\n\n self._vectorizer = TextVectorization(\n max_tokens=self.data_cfg.stock_news_limit, output_sequence_length=self.MAX_EVENT_LENGTH\n )\n self.embedding_model: Sequential\n self._prepare_word_embedding()\n\n NOTHING_HAPPENED_TEXT = \"Nothing happened\"\n EMBEDDING_DIM = 300\n MAX_EVENT_LENGTH = 50\n PATH_TO_GLOVE_FILE = \"data/assets/glove.6B.300d.txt\"\n\n def build_events_data_with_gt(self):\n \"\"\"builds event data\"\"\"\n\n # check cached events_df\n if self._old_preprocessing_result_can_be_reused:\n return\n\n # vertically concatenate all symbols and their events\n events_df = pd.concat(\n [self._build_df_for_symbol(symbol) for symbol in self.data_cfg.symbols]\n )\n\n # join event_title & event_text columns\n events_df[\"event\"] = events_df[\"event_type\"] + \" \" + events_df[\"event_text\"]\n events_df = events_df.drop([\"event_type\", \"event_text\"], axis=1)\n events_df = events_df.astype({\"event\": object})\n\n # We have to incorporate the feedback at the end of the event embedding vector\n # pylint: disable=unnecessary-lambda\n events_df[\"event\"] = events_df.apply(\n lambda row: self._create_embedding_with_feedback(row), axis=1\n )\n\n # We incorporated the feedback at the end of the event embedding matrix\n # so we don't need the single metrics anymore\n events_df = events_df.drop(self.data_cfg.feedback_metrics, axis=1)\n\n # build multi-index dataframe per date and symbol to later generate tensors\n # with the right shape easily\n #\n # The grouping with gt_trend is unnecessary here, because it holds the same grouping\n # information as 'date' and 'symbol' combined. We have to list it here in order\n # to copy it over to the new events_df dataframe\n events_df = events_df.groupby([\"date\", \"symbol\", \"gt_trend\"])[\"event\"].apply(list).reset_index()\n events_df.set_index([\"date\", \"symbol\"], inplace=True)\n\n # events_df.index = events_df.index.set_levels(\n # events_df.index.levels[0], level=0\n # )\n\n self._set_train_val_test_split(events_df)\n\n def get_val_ds(self):\n \"\"\"windowed tensorflow validation dataset\"\"\"\n return self._get_tf_dataset(self._events_val_df, self._gt_val_df, DatasetType.VAL_DS)\n\n def get_train_ds(self):\n \"\"\"windowed tensorflow training dataset\"\"\"\n return self._get_tf_dataset(self._events_train_df, self._gt_train_df, DatasetType.TRAIN_DS)\n\n def get_test_ds(self):\n \"\"\"windowed tensorflow test dataset\"\"\"\n return self._get_tf_dataset(self._events_test_df, self._gt_test_df, DatasetType.TEST_DS)\n\n def _prepare_word_embedding(self):\n if self._old_preprocessing_result_can_be_reused:\n return\n\n self._set_vectorizer()\n\n vocab = self._vectorizer.get_vocabulary()\n num_tokens = len(vocab) + 2\n\n embedding_matrix = self._build_embedding_matrix(vocab)\n embedding = Embedding(\n num_tokens,\n self.EMBEDDING_DIM,\n input_length=self.MAX_EVENT_LENGTH,\n embeddings_initializer=keras.initializers.Constant(embedding_matrix),\n trainable=False,\n )\n\n self.embedding_model = Sequential()\n self.embedding_model.add(embedding)\n self.embedding_model.compile()\n\n def _get_tf_dataset(self, events_df, gt_df, ds_type: DatasetType):\n \"\"\"Return windowed dataset based on events_df and ground truth\"\"\"\n dataset_path = f'data/datasets/{ds_type.value}'\n\n if self._old_preprocessing_result_can_be_reused:\n return tf.data.experimental.load(dataset_path)\n\n sliding_window_length = self.hp_cfg.sliding_window_size\n\n dates_count = len(events_df.groupby(level=0))\n symbols_count = len(events_df.groupby(level=1))\n\n assert sliding_window_length < dates_count, (\n f\"sliding window length ({sliding_window_length}) \"\n f\"does exceed date count ({dates_count}) in dataset.\"\n )\n\n # build the input np matrix\n\n np_stock_matrix = events_df.values.reshape(dates_count, symbols_count, 1)\n\n events_counts = []\n\n def add_to_events_counts(list_input):\n events_counts.append(len(list_input[0]))\n\n np.apply_along_axis(\n add_to_events_counts, axis=2, arr=np_stock_matrix\n )\n\n max_event_count = max(events_counts)\n\n # timeseries_dataset_from_array only takes np arrays with defined shape.\n # The third to last dimension of the np stock array (events count) is padded\n # to match the longest element in this dimension\n\n def array_cast(list_input):\n unfold_event_list = np.asarray(list_input[0])\n return np.pad(\n unfold_event_list,\n (\n (0, max_event_count - unfold_event_list.shape[0]),\n (0, 0),\n (0, 0),\n ),\n )\n\n np_stock_matrix = np.apply_along_axis(array_cast, axis=2, arr=np_stock_matrix)\n\n # build the gt np matrix\n np_gt_trend_matrix = gt_df.values.reshape(dates_count, symbols_count, 1)\n\n # since the 'timeseries_dataset_from_array' documentation states:\n #\n # \"targets[i] should be the target corresponding to the window that starts at index i\"\n #\n # we have to 'shift' the gt_tensor #{sliding_window_length} time steps 'back in time',\n # so that target[1] yields the gt for the first window, which otherwise would be at\n # target[{sliding_window_length}]\n np_gt_trend_matrix = np.roll(\n np_gt_trend_matrix, shift=-(sliding_window_length - 1), axis=0\n )\n\n tf_ds = tf.keras.preprocessing.timeseries_dataset_from_array(\n data=np_stock_matrix.astype('float16'),\n targets=np_gt_trend_matrix.astype('float16'),\n sequence_length=sliding_window_length,\n sequence_stride=1,\n batch_size=self.train_cfg.batch_size,\n )\n\n tf_ds = tf_ds.cache().prefetch(tf.data.AUTOTUNE)\n # cache dataset\n tf.data.experimental.save(tf_ds, dataset_path)\n\n return tf_ds\n\n def _build_date_dataframe(self):\n dates = pd.date_range(self.data_cfg.start_str, self.data_cfg.end_str, freq=\"D\")\n date_df = pd.DataFrame({\"date\": dates})\n date_df[\"date\"] = date_df[\"date\"].apply(lambda x: x.date())\n return date_df\n\n def _build_df_for_symbol(self, symbol):\n\n symbol_event_df = self._build_events_df_for_symbol(symbol)\n symbol_feedback_and_gt_df = self._build_price_gt_df_for_symbol(symbol)\n\n symbol_df = pd.merge(self.date_df, symbol_event_df, on=\"date\", how=\"left\")\n symbol_df = pd.merge(symbol_df, symbol_feedback_and_gt_df, on=\"date\")\n\n symbol_df[\"event_type\"] = symbol_df[\"event_type\"].replace(\n np.nan, EventType.NO_EVENT.value\n )\n\n symbol_df[\"event_text\"] = symbol_df[\"event_text\"].replace(\n np.nan, self.NOTHING_HAPPENED_TEXT\n )\n\n symbol_df[\"symbol\"] = symbol_df[\"symbol\"].replace(np.nan, symbol)\n\n return symbol_df\n\n def _build_events_df_for_symbol(self, symbol):\n symbol_press_df = self._get_symbol_press_df(symbol)\n symbol_news_df = self._get_symbol_news_df(symbol)\n\n return pd.concat([symbol_press_df, symbol_news_df], axis=0)\n\n def _get_symbol_press_df(self, symbol):\n symbol_press_df = pd.DataFrame.from_dict(\n self.data_store.get_press_release_data(symbol)\n )\n\n return _preprocess_event_df(symbol_press_df, EventType.PRESS_EVENT)\n\n def _get_symbol_news_df(self, symbol):\n symbol_news_df = pd.DataFrame.from_dict(\n self.data_store.get_stock_news_data(symbol)\n )\n\n return _preprocess_event_df(symbol_news_df, EventType.NEWS_EVENT)\n\n def _build_price_gt_df_for_symbol(self, symbol):\n symbol_price_df = pd.DataFrame.from_dict(\n self.data_store.get_price_data(symbol),\n )\n\n symbol_price_df = symbol_price_df.astype(\n {\n \"date\": str,\n \"low\": float,\n \"high\": float,\n \"close\": float,\n \"open\": float,\n \"vwap\": float,\n }\n )\n\n symbol_price_df[\"date\"] = pd.to_datetime(\n symbol_price_df[\"date\"], format=self.data_cfg.DATE_FORMAT\n ).apply(lambda x: x.date())\n symbol_price_df = pd.merge(\n self.date_df, symbol_price_df, on=\"date\", how=\"left\"\n ).ffill()\n\n symbol_feedback_df = symbol_price_df.drop([\"date\"], axis=1)\n\n indicator_next_day = symbol_feedback_df.shift(-1).replace(np.nan, 0)\n indicator_current_day = symbol_feedback_df\n symbol_feedback_df = (indicator_next_day - indicator_current_day) / indicator_current_day\n\n symbol_feedback_df = symbol_feedback_df.join(symbol_price_df[\"date\"])\n\n # duplicate symbols gt metric column with dedicated gt label\n symbol_feedback_df[\"gt_trend\"] = symbol_feedback_df[\n self.data_cfg.gt_metric.value\n ]\n\n # return all fields which are choosen for feedback metrics and gt\n return symbol_feedback_df.drop(\n [\n field\n for field in PriceDataInfo.fields\n if field != \"date\"\n and field != \"gt_trend\"\n and field not in self.data_cfg.feedback_metrics\n ],\n axis=1,\n )\n\n def _set_train_val_test_split(self, events_df):\n\n actual_val_split = 1 - (self.train_cfg.val_split + self.train_cfg.test_split)\n actual_test_split = 1 - self.train_cfg.test_split\n\n # since np.split does not take hierarchical indexing into account\n # but rather flattens the index, we have to make sure not to split\n # in the middle of a day\n dates_count = len(events_df.index.levels[0])\n symbols_count = len(events_df.index.levels[1])\n\n dates_val_split = int(dates_count * actual_val_split) * symbols_count\n dates_test_split = int(dates_count * actual_test_split) * symbols_count\n\n # pylint: disable=unbalanced-tuple-unpacking\n events_train_df, events_val_df, events_test_df = np.split(\n events_df,\n [\n dates_val_split,\n dates_test_split,\n ],\n )\n\n self._events_train_df = events_train_df[\"event\"]\n self._gt_train_df = events_train_df[\"gt_trend\"]\n\n self._events_val_df = events_val_df[\"event\"]\n self._gt_val_df = events_val_df[\"gt_trend\"]\n\n self._events_test_df = events_test_df[\"event\"]\n self._gt_test_df = events_test_df[\"gt_trend\"]\n\n def _get_event_texts_for_symbol(self, symbol):\n press_texts = self._get_symbol_press_df(symbol)[\"event_text\"]\n press_texts = EventType.PRESS_EVENT.value + \" \" + press_texts\n news_texts = self._get_symbol_news_df(symbol)[\"event_text\"]\n news_texts = EventType.NEWS_EVENT.value + \" \" + news_texts\n\n return pd.concat([press_texts, news_texts], axis=0)\n\n def _create_embedding_with_feedback(self, events_df_row):\n event_string = events_df_row[\"event\"]\n event_vector = self._vectorizer([event_string])\n event_embedding = self.embedding_model.predict(event_vector)\n\n # event embedding comes in the shape [1,50,300], we want the shape [50, 300],\n # which represents one sentence much better.\n event_embedding = np.squeeze(event_embedding)\n\n # we have to append the events feedback to the event embedding keep the dataset\n # shape working therefore each feedback metric has to be expressed with a\n # (300) vector.\n feedback_row = events_df_row[self.data_cfg.feedback_metrics].values\n new_feedback_shape = (len(self.data_cfg.feedback_metrics), self.EMBEDDING_DIM)\n feedback_row = np.broadcast_to(\n np.expand_dims(feedback_row, axis=1), new_feedback_shape\n )\n\n return np.concatenate((event_embedding, feedback_row), axis=0)\n\n def _set_vectorizer(self):\n all_event_texts = pd.concat(\n [\n self._get_event_texts_for_symbol(symbol)\n for symbol in self.data_cfg.symbols\n ]\n )\n all_event_texts = all_event_texts.append(\n pd.Series(EventType.NO_EVENT.value + \" \" + self.NOTHING_HAPPENED_TEXT)\n )\n\n self._vectorizer.adapt(\n tf.data.Dataset.from_tensor_slices(all_event_texts.values).batch(128)\n )\n\n def _build_embedding_matrix(self, vocab):\n # setup word index\n word_index = dict(zip(vocab, range(len(vocab))))\n\n # setup embedding index\n embeddings_index = {}\n with open(self.PATH_TO_GLOVE_FILE, encoding=\"utf8\") as file:\n for line in file:\n word, coefs = line.split(maxsplit=1)\n coefs = np.fromstring(coefs, \"f\", sep=\" \")\n embeddings_index[word] = coefs\n\n hits = 0\n misses = 0\n # construct embedding matrix\n missed_words = []\n num_tokens = len(vocab) + 2\n embedding_matrix = np.zeros((num_tokens, self.EMBEDDING_DIM))\n for word, i in word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n # Words not found in embedding index will be all-zeros.\n # This includes the representation for \"padding\" and \"OOV\"\n embedding_matrix[i] = embedding_vector\n hits += 1\n else:\n missed_words.append(word)\n misses += 1\n logger.info(\"Converted %d words (%d misses)\" % (hits, misses))\n\n return embedding_matrix\n\n def _check_reusability_of_old_preprocessing(self):\n has_been_cached = all([os.path.isdir(t.value) for t in DatasetType])\n new_configs = self._hp_cfg_has_changed() or self._train_cfg_has_changed()\n\n return has_been_cached and not new_configs and self.data_store.old_data_can_be_reused\n\n def _hp_cfg_has_changed(self):\n if hp_cfg_is_cached():\n old_cfg = deserialize_hp_cfg()\n return old_cfg != self.hp_cfg\n return True\n\n def _train_cfg_has_changed(self):\n if train_cfg_is_cached():\n old_cfg = deserialize_train_cfg()\n return old_cfg != self.train_cfg\n return True\n","sub_path":"data/preprocesser.py","file_name":"preprocesser.py","file_ext":"py","file_size_in_byte":17590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"323789381","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# BiblioJAM (Versión 2.0) - 16/04/2011 - CeibalJAM! - Uruguay\n# JAMButton.py por: Flavio Danesse fdanesse@gmail.com\n# https://sites.google.com/site/flaviodanesse/\n# https://sites.google.com/site/sugaractivities/\n# http://codigosdeejemplo.blogspot.com/\n\nfrom .JAMLabel import JAMLabel\nfrom . import JAMGlobals as VG\nimport pygame\nimport gc\nimport sys\nfrom pygame.locals import *\ngc.enable()\n\n\nclass JAMButton(pygame.sprite.Sprite):\n def __init__(self, texto, imagen, tipo=\"rectangulo\"):\n pygame.sprite.Sprite.__init__(self)\n\n self.image = None\n self.rect = None\n self.posicion = (0, 0)\n self.select = False\n self.sonido_select = VG.get_sound_select()\n self.callback = None\n self.alineacion = \"centro\"\n self.tipo = tipo\n\n COLORCARA, COLORBAS, COLORBOR, GROSORBOR, DETALLE, ESPESOR = VG.get_default_jambutton_values()\n\n self.base = {\"tamanio\": None, \"colorbas\": COLORBAS, \"colorbor\": COLORBOR,\n \"grosorbor\": GROSORBOR, \"detalle\": DETALLE, \"espesor\": ESPESOR}\n self.cara = {\"tamanio\": None, \"color\": COLORCARA}\n self.borde_label = {\"grosor\": 0, \"color\": VG.get_negro()}\n\n self.etiqueta_unselect = JAMLabel(texto)\n self.etiqueta_unselect.set_contenedor(colorbas=self.cara[\"color\"])\n self.etiqueta_select = JAMLabel(texto)\n self.etiqueta_select.set_contenedor(colorbas=self.base[\"colorbor\"])\n\n self.JAMObjects = {\"JAMLabelunselect\": self.etiqueta_unselect, \"JAMLabelselect\": self.etiqueta_select,\n \"Base\": self.base, \"Cara\": self.cara, \"Borde\": self.borde_label}\n\n self.imagen_cara_unselect = None\n self.imagen_cara_select = None\n self.final_unselect = None\n self.final_select = None\n\n self.Reconstruye_JAMButton([\"texto\"])\n\n # ---------- SETEOS Y GET GENERALES ---------- #\n def get_text(self):\n ''' Devuelve la cadena de Texto en JAMLabel. '''\n return self.etiqueta_unselect.get_text()\n\n def set_tipo(self, tipo):\n ''' Setea el tipo de botón \"elipse\" o \"rectangulo\". '''\n if tipo and tipo != self.tipo and (tipo == \"elipse\" or tipo == \"rectangulo\"):\n self.tipo = tipo\n self.Reconstruye_JAMButton([\"texto\"])\n\n def get_posicion(self):\n ''' Devuelve la posición actual. '''\n return self.posicion\n\n # ---------- SETEOS SOBRE LA ETIQUETA ---------- #\n def set_text(self, tipo=None, tamanio=None, color=None, texto=None):\n ''' Setea el Texto en JAMLabel. '''\n self.etiqueta_unselect.set_text(\n tipo=tipo, tamanio=tamanio, color=color, texto=texto)\n self.etiqueta_select.set_text(\n tipo=tipo, tamanio=tamanio, color=color, texto=texto)\n self.Reconstruye_JAMButton([\"texto\"])\n\n def set_font_from_file(self, direccion_archivo, tamanio=None):\n ''' Setea la fuente desde un archivo en JAMLabel. '''\n self.etiqueta_unselect.set_font_from_file(direccion_archivo, tamanio)\n self.etiqueta_select.set_font_from_file(direccion_archivo, tamanio)\n self.Reconstruye_JAMButton([\"texto\"])\n\n def set_imagen(self, origen=None, tamanio=None):\n ''' Setea el Imagen en JAMLabel. '''\n self.etiqueta_unselect.set_imagen(origen=origen, tamanio=tamanio)\n self.etiqueta_select.set_imagen(origen=origen, tamanio=tamanio)\n self.Reconstruye_JAMButton([\"imagen\"])\n # ---------- SETEOS SOBRE LA ETIQUETA ---------- #\n\n # ---------- SETEOS SOBRE LA BASE ---------- #\n def set_tamanios(self, tamanio=None, grosorbor=None, detalle=None, espesor=None):\n cambios = False\n # desactivar tamaños\n if tamanio == -1 and tamanio != None:\n tamanio = None\n self.base[\"tamanio\"] = None\n cambios = True\n if grosorbor == -1 and grosorbor != None:\n grosorbor = None\n self.base[\"grosorbor\"] = 1\n cambios = True\n if detalle == -1 and detalle != None:\n detalle = None\n self.base[\"detalle\"] = 1\n cambios = True\n if espesor == -1 and espesor != None:\n espesor = None\n self.base[\"espesor\"] = 1\n cambios = True\n\n # establecer tamaños\n if tamanio and tamanio != self.base[\"tamanio\"]:\n self.base[\"tamanio\"] = tamanio\n cambios = True\n if grosorbor and grosorbor != self.base[\"grosorbor\"]:\n self.base[\"grosorbor\"] = grosorbor\n cambios = True\n if detalle and detalle != self.base[\"detalle\"]:\n self.base[\"detalle\"] = detalle\n cambios = True\n if espesor and espesor != self.base[\"espesor\"]:\n self.base[\"espesor\"] = espesor\n cambios = True\n\n if cambios:\n self.Reconstruye_JAMButton([\"tamanio\"])\n\n def set_colores(self, colorbas=None, colorbor=None, colorcara=None):\n ''' Setea los colores del botón y la etiqueta. '''\n cambios = False\n if colorbas and colorbas != self.base[\"colorbas\"]:\n self.base[\"colorbas\"] = colorbas\n cambios = True\n if colorbor and colorbor != self.base[\"colorbor\"]:\n self.base[\"colorbor\"] = colorbor\n cambios = True\n if colorcara and colorcara != self.cara[\"color\"]:\n self.cara[\"color\"] = colorcara\n cambios = True\n\n if cambios:\n # , grosor=None, colorbor=None)\n self.etiqueta_unselect.set_contenedor(colorbas=self.cara[\"color\"])\n # , grosor=None, colorbor=None)\n self.etiqueta_select.set_contenedor(colorbas=self.base[\"colorbor\"])\n self.Reconstruye_JAMButton([\"colores\"])\n\n def set_borde_label(self, grosor=None, color=None):\n ''' Agrega o quita un borde sobre la cara de JAMButton. '''\n cambios = False\n if grosor < 1 and grosor != self.borde_label[\"grosor\"]:\n grosor = None\n cambios = True\n if grosor and grosor != self.borde_label[\"grosor\"]:\n self.borde_label[\"grosor\"] = grosor\n cambios = True\n if color and color != self.borde_label[\"color\"]:\n self.borde_label[\"color\"] = color\n cambios = True\n\n if cambios:\n self.Reconstruye_JAMButton([\"borde\"])\n\n def set_alineacion_label(self, alineacion):\n ''' Setea la alineacion de JAMLabel sobre la cara de JAMButton. '''\n if alineacion == \"centro\" or alineacion == \"izquierda\" or alineacion == \"derecha\":\n self.alineacion = alineacion\n self.Reconstruye_JAMButton([\"alineacion\"])\n # ---------- SETEOS SOBRE LA BASE ---------- #\n\n def connect(self, callback=None, sonido_select=None):\n '''Conecta el botón a una función y un sonido para reproducir al hacer click sobre JAMButton. '''\n self.callback = callback\n # debes pasar una referencia al audio ya cargado para no cargarlo cada vez que creas un botón\n self.sonido_select = sonido_select\n\n def set_posicion(self, punto=None):\n ''' Setea la posición de JAMButton en la pantalla. '''\n try:\n if punto:\n self.rect.x, self.rect.y = (punto)\n self.posicion = punto\n except:\n pass\n\n # ------------- GETS ------------------------\n def get_tamanio(self):\n return (self.rect.w, self.rect.h)\n\n # ----------- CONSTRUCCION -------------------\n def Reconstruye_JAMButton(self, cambios):\n ''' Cada vez que se setea algo, se reconstruye JAMButton con sus nuevos valores. '''\n if \"tamanio\" in cambios:\n # reconstruye la cara en base a la etiqueta\n self.cara[\"tamanio\"] = None\n self.imagen_cara_unselect, self.imagen_cara_select = self.construye_cara()\n\n # verifica tamaño minimo para la base según la cara reconstruida\n anchominimo, altominimo = self.get_minimo_tamanio_base()\n if not self.base[\"tamanio\"]:\n self.base[\"tamanio\"] = (anchominimo, altominimo)\n ancho, alto = self.base[\"tamanio\"]\n if anchominimo > ancho:\n ancho = anchominimo\n if altominimo > alto:\n alto = altominimo\n\n # Establece los nuevos tamaños\n self.base[\"tamanio\"] = (ancho, alto)\n self.cara[\"tamanio\"] = self.get_tamanio_cara_recalculado()\n\n # pegar la etiqueta sobre la cara (¿Alineaciones?)\n self.imagen_cara_unselect, self.imagen_cara_select = self.construye_cara()\n # pegar la cara sobre la base\n self.final_unselect, self.final_select = self.construye_boton()\n\n self.image = self.final_unselect\n self.rect = self.image.get_rect()\n\n self.set_posicion(self.posicion) # seteo automático de posición\n\n def get_minimo_tamanio_base(self):\n ''' Devuelve el tamaño mínimo que puede tener la base del botón. '''\n x = self.base[\"grosorbor\"] + int(self.base[\"espesor\"]/3)\n ancho = x + self.cara[\"tamanio\"][0] + \\\n self.base[\"espesor\"] + self.base[\"grosorbor\"]\n y = self.base[\"grosorbor\"] + int(self.base[\"espesor\"]/3)\n alto = y + self.cara[\"tamanio\"][1] + \\\n self.base[\"espesor\"] + self.base[\"grosorbor\"]\n return (ancho, alto)\n\n def get_tamanio_cara_recalculado(self):\n ''' Devuelve el tamaño que debe tener la cara luego de seteados los tamaños del JAMButton. '''\n tamanio = (0, 0)\n if self.tipo == \"elipse\":\n (xx, yy, ss, zz) = self.etiqueta_unselect.rect\n x = self.base[\"grosorbor\"] + int(self.base[\"espesor\"]/3)+zz/2\n ancho = x + self.base[\"espesor\"] + self.base[\"grosorbor\"]+zz/2\n y = self.base[\"grosorbor\"] + int(self.base[\"espesor\"]/3)+zz/2\n alto = y + self.base[\"espesor\"] + self.base[\"grosorbor\"]+zz/2\n a, h = self.base[\"tamanio\"]\n tamanio = (a - ancho, h - alto)\n else:\n x = self.base[\"grosorbor\"] + int(self.base[\"espesor\"]/3)\n ancho = x + self.base[\"espesor\"] + self.base[\"grosorbor\"]\n y = self.base[\"grosorbor\"] + int(self.base[\"espesor\"]/3)\n alto = y + self.base[\"espesor\"] + self.base[\"grosorbor\"]\n a, h = self.base[\"tamanio\"]\n tamanio = (a - ancho, h - alto)\n return tamanio\n\n def construye_cara(self):\n ''' Crea la cara del botón y pega centrado en ella el JAMLabel. '''\n unselect, select = (None, None)\n if self.tipo == \"elipse\":\n w, h = (0, 0)\n # toma tamaño de etiqueta como referencia\n if not self.cara[\"tamanio\"]:\n w = self.etiqueta_unselect.rect.w + \\\n self.base[\"detalle\"]+self.etiqueta_unselect.rect.h\n h = self.etiqueta_unselect.rect.h + \\\n self.base[\"detalle\"]+self.etiqueta_unselect.rect.h\n self.cara[\"tamanio\"] = (w, h)\n # la cara nunca puede ser menor que la etiqueta pero si mayor\n if self.cara[\"tamanio\"] and self.cara[\"tamanio\"][0] < self.etiqueta_unselect.rect.w+self.base[\"detalle\"]+self.etiqueta_unselect.rect.h:\n w = self.etiqueta_unselect.rect.w + \\\n self.base[\"detalle\"]+self.etiqueta_unselect.rect.h\n self.cara[\"tamanio\"] = (w, h)\n if self.cara[\"tamanio\"] and self.cara[\"tamanio\"][1] < self.etiqueta_unselect.rect.h+self.base[\"detalle\"]+self.etiqueta_unselect.rect.h:\n h = self.etiqueta_unselect.rect.h + \\\n self.base[\"detalle\"]+self.etiqueta_unselect.rect.h\n self.cara[\"tamanio\"] = (w, h)\n unselect = VG.get_Elipse(self.cara[\"color\"], self.cara[\"tamanio\"])\n select = VG.get_Elipse(self.base[\"colorbor\"], self.cara[\"tamanio\"])\n\n # alineación desabilitada por bug\n self.alineacion = \"centro\"\n unselect = VG.pegar_imagenes_centradas(\n self.etiqueta_unselect.image, unselect)\n select = VG.pegar_imagenes_centradas(\n self.etiqueta_select.image, select)\n else:\n w, h = (0, 0)\n # toma tamaño de etiqueta como referencia\n if not self.cara[\"tamanio\"]:\n w = self.etiqueta_unselect.rect[2]+self.base[\"detalle\"]\n h = self.etiqueta_unselect.rect[3]+self.base[\"detalle\"]\n self.cara[\"tamanio\"] = (w, h)\n # la cara nunca puede ser menor que la etiqueta pero si mayor\n if self.cara[\"tamanio\"] and self.cara[\"tamanio\"][0] < self.etiqueta_unselect.rect[2]+self.base[\"detalle\"]:\n w = self.etiqueta_unselect.rect[2]+self.base[\"detalle\"]\n self.cara[\"tamanio\"] = (w, h)\n if self.cara[\"tamanio\"] and self.cara[\"tamanio\"][1] < self.etiqueta_unselect.rect[3]+self.base[\"detalle\"]:\n h = self.etiqueta_unselect.rect[3]+self.base[\"detalle\"]\n self.cara[\"tamanio\"] = (w, h)\n unselect = VG.get_Rectangulo(\n self.cara[\"color\"], self.cara[\"tamanio\"])\n select = VG.get_Rectangulo(\n self.base[\"colorbor\"], self.cara[\"tamanio\"])\n\n if self.alineacion == \"centro\":\n unselect = VG.pegar_imagenes_centradas(\n self.etiqueta_unselect.image, unselect)\n select = VG.pegar_imagenes_centradas(\n self.etiqueta_select.image, select)\n elif self.alineacion == \"izquierda\":\n unselect = VG.pegar_imagenes_alineado_izquierda(\n self.etiqueta_unselect.image, unselect)\n select = VG.pegar_imagenes_alineado_izquierda(\n self.etiqueta_select.image, select)\n elif self.alineacion == \"derecha\":\n unselect = VG.pegar_imagenes_alineado_derecha(\n self.etiqueta_unselect.image, unselect)\n select = VG.pegar_imagenes_alineado_derecha(\n self.etiqueta_select.image, select)\n else:\n self.alineacion = \"centro\"\n unselect = VG.pegar_imagenes_centradas(\n self.etiqueta_unselect.image, unselect)\n select = VG.pegar_imagenes_centradas(\n self.etiqueta_select.image, select)\n\n if self.borde_label[\"grosor\"] > 1 and self.borde_label[\"grosor\"] != None:\n if not self.borde_label[\"color\"]:\n self.borde_label[\"color\"] = VG.get_negro()\n if self.tipo == \"elipse\":\n # desabilitado por bug\n #unselect= VG.get_my_surface_whit_elipse_border(unselect, self.borde_label[\"color\"], self.borde_label[\"grosor\"])\n #select= VG.get_my_surface_whit_elipse_border(select, self.borde_label[\"color\"], self.borde_label[\"grosor\"])\n pass\n else:\n unselect = VG.get_my_surface_whit_border(\n unselect, self.borde_label[\"color\"], self.borde_label[\"grosor\"])\n select = VG.get_my_surface_whit_border(\n select, self.borde_label[\"color\"], self.borde_label[\"grosor\"])\n return unselect, select\n\n def construye_boton(self):\n ''' Construye las imagenes finales del botón. '''\n unselect = None\n select = None\n if self.tipo == \"elipse\":\n x = self.base[\"grosorbor\"] + int(self.base[\"espesor\"]/3)\n ancho = x + self.cara[\"tamanio\"][0] + \\\n self.base[\"espesor\"] + self.base[\"grosorbor\"]\n y = self.base[\"grosorbor\"] + int(self.base[\"espesor\"]/3)\n alto = y + self.cara[\"tamanio\"][1] + \\\n self.base[\"espesor\"] + self.base[\"grosorbor\"]\n self.base[\"tamanio\"] = (ancho, alto)\n unselect = VG.get_Elipse(\n self.base[\"colorbas\"], self.base[\"tamanio\"])\n unselect = VG.get_my_surface_whit_elipse_border(\n unselect, self.base[\"colorbor\"], self.base[\"grosorbor\"])\n select = VG.get_Elipse(self.base[\"colorbas\"], self.base[\"tamanio\"])\n select = VG.get_my_surface_whit_elipse_border(\n select, self.base[\"colorbor\"], self.base[\"grosorbor\"])\n else:\n x = self.base[\"grosorbor\"] + int(self.base[\"espesor\"]/3)\n ancho = x + self.cara[\"tamanio\"][0] + \\\n self.base[\"espesor\"] + self.base[\"grosorbor\"]\n y = self.base[\"grosorbor\"] + int(self.base[\"espesor\"]/3)\n alto = y + self.cara[\"tamanio\"][1] + \\\n self.base[\"espesor\"] + self.base[\"grosorbor\"]\n self.base[\"tamanio\"] = (ancho, alto)\n unselect = VG.get_Rectangulo(\n self.base[\"colorbas\"], self.base[\"tamanio\"])\n unselect = VG.get_my_surface_whit_border(\n unselect, self.base[\"colorbor\"], self.base[\"grosorbor\"])\n select = VG.get_Rectangulo(\n self.base[\"colorbas\"], self.base[\"tamanio\"])\n select = VG.get_my_surface_whit_border(\n select, self.base[\"colorbor\"], self.base[\"grosorbor\"])\n\n unselect.blit(self.imagen_cara_unselect, (x, y))\n select.blit(self.imagen_cara_select, (x, y))\n return unselect, select\n # -------------------- FIN DE METODOS DE CONSTRUCCION -------------------- #\n\n # -------------------- INICIO DE METODOS INTERNOS AUTOMÁTICOS -------------------- #\n def play_select(self):\n # reproduce un sonido cuando pasas el mouse sobre el botón\n if self.sonido_select:\n self.sonido_select.play()\n\n def update(self):\n # responde a los eventos del mouse sobre el sprite\n '''\n posicion = pygame.mouse.get_pos()\n # Selecciona el botón cuando el mouse pasa encima de él\n if self.rect.collidepoint(posicion):\n if self.select == False:\n self.play_select()\n self.image = self.final_select.copy()\n self.select = True\n\n if pygame.event.get(pygame.MOUSEBUTTONDOWN):\n if self.callback:\n return self.callback(self) # modificación 29 de mayo el boton se pasa a si mismo.\n else:\n if self.select == True:\n self.image = self.final_unselect.copy()\n self.select = False'''\n\n ''' Reimplementación para Compatibilidad en Plug de gtk y mejora en rendimiento:\n\t\tLos eventos se capturan por su tipo, se manejan y si es necesario se republican para habilitar la captura\n\t\tde los mismos por parte de otros controles.'''\n eventos_republicar = []\n eventos = pygame.event.get(pygame.MOUSEBUTTONDOWN)\n for event in eventos:\n posicion = event.pos\n if self.rect.collidepoint(posicion):\n # Si el mouse está presionado sobre el botón.\n if self.callback:\n # y si además hay callback para esta acción.\n pygame.event.clear()\n return self.callback(self)\n else:\n # Si el mouse no está presionado sobre el botón.\n if not event in eventos_republicar:\n eventos_republicar.append(event)\n\n eventos = pygame.event.get(pygame.MOUSEMOTION)\n for event in eventos:\n posicion = event.pos\n if self.rect.collidepoint(posicion):\n # Si el mouse está sobre el botón.\n if self.select == False:\n self.play_select()\n self.image = self.final_select\n self.select = True\n else:\n # Si el mouse no está sobre el botón.\n if self.select == True:\n self.image = self.final_unselect\n self.select = False\n if not event in eventos_republicar:\n eventos_republicar.append(event)\n\n for event in eventos_republicar:\n # Se republican todos los eventos que este control no debe manejar.\n pygame.event.post(event)\n\n def Describe(self):\n ''' Describe la Estructura de Este Control. '''\n estructura = '''\n\t\tEstructura JAMButton:\n\t\t\tJAMObjects:\n\t\t\t\tJAMLabelunselect\n\t\t\t\tJAMLabelselect\n\t\t\t\tBase\n\t\t\t\tCara\n\t\t\t\tBorde '''\n print(estructura)\n print(\"Ejemplo, Configuración actual:\\n\")\n print(\"\\t\", list(self.JAMObjects.keys()), \"\\n\")\n for k in list(self.JAMObjects.items()):\n print(k, \"\\n\")\n\n# ----- FIN DE CLASE JAMButton - INICIO DE DEBUG Y EJEMPLO DE LA CLASE -----\n\n\nclass Ejemplo(object):\n def __init__(self):\n self.ventana = None\n self.reloj = None\n self.nivel = \"menu_0\"\n\n self.fondo = None\n self.widgets = None\n\n self.resolucion = (800, 800)\n\n self.setup()\n self.Run()\n\n def Run(self):\n self.ventana.blit(self.fondo, (0, 0))\n self.widgets.draw(self.ventana)\n pygame.display.update()\n\n #self.widgets.sprites()[0].set_text(tipo=\"Arial\", tamanio=25, color=None, texto=\"Flavio Danesse\")\n #self.widgets.sprites()[0].set_imagen(origen=VG.URUGUAY, tamanio=None)\n #self.widgets.sprites()[0].set_colores(colorbas=None, colorbor=color, colorcara=None)\n #self.widgets.sprites()[0].set_tamanios(tamanio=tamanio, grosorbor=None, detalle=None, espesor=None)\n # self.widgets.sprites()[0].set_posicion(punto=(25,25))\n #self.widgets.sprites()[0].set_borde_label(grosor=2, color=VG.get_negro())\n self.widgets.sprites()[0].connect(\n callback=self.salir, sonido_select=VG.get_sound_select())\n contador = 0\n while self.nivel == \"menu_0\":\n self.reloj.tick(35)\n\n cambios = []\n self.widgets.clear(self.ventana, self.fondo)\n\n if contador == 100:\n # Activa la siguiente línea para provocar cambios de texto en JAMButton\n contador = self.ejemplo_cambia_texto_en_button()\n # Activa la siguiente línea para provocar cambios de imagen en JAMButton\n contador = self.ejemplo_cambia_imagen_en_button()\n # Activa la siguiente línea para provocar cambios de contenedor en JAMButton\n contador = self.ejemplo_cambia_colores_en_button()\n # Activa la siguiente línea para provocar cambios de posicion en JAMButton\n contador = self.ejemplo_cambia_posicion_en_button()\n # Activa la siguiente línea para provocar cambios de tamaño en JAMButton\n contador = self.ejemplo_cambia_tamanios_en_button()\n pass # y amor\n\n self.widgets.update()\n self.handle_event()\n pygame.event.clear()\n cambios.extend(self.widgets.draw(self.ventana))\n pygame.display.update(cambios)\n contador += 1\n\n def ejemplo_cambia_texto_en_button(self):\n import random\n cambios = [\"tipo\", \"tamanio\", \"color\", \"texto\"]\n modificar = random.choice(cambios)\n if modificar == \"tipo\":\n tipos = [\"Arial\", \"Purisa\", \"Times New Roman\",\n \"Vardana\", \"Impact\", pygame.font.get_default_font()]\n tipo = random.choice(tipos)\n self.widgets.sprites()[0].set_text(tipo=random.choice(\n tipos), tamanio=None, color=None, texto=None)\n if modificar == \"tamanio\":\n tamanios = [10, 20, 30, 40, 45]\n tamanio = random.choice(tamanios)\n self.widgets.sprites()[0].set_text(\n tipo=None, tamanio=tamanio, color=None, texto=None)\n if modificar == \"color\":\n colores = [(0, 0, 0, 255), (100, 100, 255, 255),\n (110, 25, 255, 255), (255, 125, 55, 255)]\n color = random.choice(colores)\n self.widgets.sprites()[0].set_text(\n tipo=None, tamanio=None, color=color, texto=None)\n if modificar == \"texto\":\n textos = [\"JAMLabel\", \"Presiona escape cuando quieras salir\",\n \"Modificando Texto en JAMLabel\", \"CeibalJAM 2011\"]\n texto = random.choice(textos)\n self.widgets.sprites()[0].set_text(\n tipo=None, tamanio=None, color=None, texto=texto)\n return 0\n\n def ejemplo_cambia_imagen_en_button(self):\n import random\n cambios = [\"origen\", \"tamanio\"]\n modificar = random.choice(cambios)\n if modificar == \"tamanio\":\n tamanios = [(10, 20), (30, 200), (250, 100), None]\n tamanio = random.choice(tamanios)\n self.widgets.sprites()[0].set_imagen(origen=None, tamanio=tamanio)\n if modificar == \"origen\":\n origenes = [VG.get_jamimagenes()[0], VG.get_jamimagenes()[1], -1]\n origen = random.choice(origenes)\n self.widgets.sprites()[0].set_imagen(origen=origen, tamanio=None)\n return 0\n\n def ejemplo_cambia_colores_en_button(self):\n import random\n cambios = [\"colorbas\", \"colorbor\", \"colorcara\"]\n modificar = random.choice(cambios)\n\n colores = [(10, 20, 100, 255), (100, 100, 100, 255),\n (255, 255, 255, 255), (255, 0, 0, 255)]\n color = random.choice(colores)\n\n if modificar == \"colorbas\":\n self.widgets.sprites()[0].set_colores(\n colorbas=color, colorbor=None, colorcara=None)\n if modificar == \"colorbor\":\n self.widgets.sprites()[0].set_colores(\n colorbas=None, colorbor=color, colorcara=None)\n if modificar == \"colorcara\":\n self.widgets.sprites()[0].set_colores(\n colorbas=None, colorbor=None, colorcara=color)\n return 0\n\n def ejemplo_cambia_tamanios_en_button(self):\n import random\n cambios = [\"tamanio\", \"grosorbor\", \"detalle\", \"espesor\"]\n modificar = random.choice(cambios)\n\n #set_tamanios(tamanio=None, grosorbor=None, detalle=None, espesor=None)\n if modificar == \"tamanio\":\n tamanios = [(200, 100), (100, 50), (20, 20),\n (300, 150), (10, 500), (300, 50), -1]\n tamanio = random.choice(tamanios)\n self.widgets.sprites()[0].set_tamanios(\n tamanio=tamanio, grosorbor=None, detalle=None, espesor=None)\n if modificar == \"grosorbor\":\n tamanios = [1, 5, 8, 10]\n tamanio = random.choice(tamanios)\n self.widgets.sprites()[0].set_tamanios(\n tamanio=None, grosorbor=tamanio, detalle=None, espesor=None)\n return 0\n\n def ejemplo_cambia_posicion_en_button(self):\n import random\n posiciones = [(0, 0), (25, 25), (25, 100), (25, 150)]\n posicion = random.choice(posiciones)\n self.widgets.sprites()[0].set_posicion(punto=posicion)\n return 0\n\n def setup(self):\n pygame.init()\n pygame.display.set_mode(self.resolucion, 0, 0)\n pygame.display.set_caption(\"Ejemplo\")\n\n self.fondo = self.get_Fondo()\n\n self.widgets = pygame.sprite.OrderedUpdates()\n self.widgets.add(JAMButton(\"JAMButton Prueba\", None, tipo=\"elipse\"))\n\n self.ventana = pygame.display.get_surface()\n self.reloj = pygame.time.Clock()\n\n pygame.event.set_blocked([JOYAXISMOTION, JOYBALLMOTION, JOYHATMOTION, JOYBUTTONUP, JOYBUTTONDOWN,\n KEYUP, USEREVENT, QUIT, ACTIVEEVENT])\n pygame.event.set_allowed(\n [MOUSEMOTION, MOUSEBUTTONUP, MOUSEBUTTONDOWN, KEYDOWN, VIDEORESIZE, VIDEOEXPOSE])\n pygame.mouse.set_visible(True)\n\n def get_Fondo(self):\n superficie = pygame.Surface(self.resolucion, flags=HWSURFACE)\n superficie.fill(VG.get_negro())\n return superficie\n\n def handle_event(self):\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n self.salir()\n pygame.event.clear()\n\n def salir(self, datos=None):\n print(\"\\n\")\n self.widgets.sprites()[0].Describe()\n pygame.quit()\n sys.exit()\n\n\nif __name__ == \"__main__\":\n Ejemplo()\n","sub_path":"BiblioJAM/JAMButton.py","file_name":"JAMButton.py","file_ext":"py","file_size_in_byte":28292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"50713137","text":"#!/usr/bin/env python\nfrom datetime import datetime, timedelta, time\nfrom unittest import TestCase\n\nimport pytz\nfrom bson import dumps, loads\n\n\nclass TestDateTime(TestCase):\n def test_datetime(self):\n now = datetime.now(pytz.utc)\n obj = {\"now\": now}\n serialized = dumps(obj)\n obj2 = loads(serialized)\n\n td = obj2[\"now\"] - now\n seconds_delta = (td.microseconds + (td.seconds + td.days * 24 * 3600) *\n 1e6) / 1e6\n self.assertTrue(abs(seconds_delta) < 0.001)\n\n\nclass TestTimeDelta(TestCase):\n def test_datetime(self):\n some_time = timedelta(microseconds=12, seconds=23, hours=2, days=5)\n obj = {\"some_time\": some_time}\n serialized = dumps(obj)\n obj2 = loads(serialized)\n\n interval = obj2[\"some_time\"]\n seconds_delta = interval.total_seconds() - some_time.total_seconds()\n self.assertTrue(abs(seconds_delta) < 0.001)\n\nclass TestTime(TestCase):\n def test_datetime(self):\n some_time = time(5, 33)\n obj = {\"utc_time\": some_time}\n serialized = dumps(obj)\n obj2 = loads(serialized)\n\n interval = obj2[\"utc_time\"]\n as_string = interval.isoformat()\n self.assertTrue(as_string == \"05:33:00\")","sub_path":"bson/tests/test_datetime.py","file_name":"test_datetime.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"224219082","text":"import math\r\n\r\nIn=open(\"train.001\",\"r\")\r\nUnigram={}\r\nBigram={}\r\nTotal=0\r\nfor Line in In:\r\n\tLine=Line.strip()\r\n\tfor i in range(len(Line)):\r\n\t\tif not Unigram.get(Line[i]):\r\n\t\t\tUnigram[Line[i]]=0\r\n\t\tUnigram[Line[i]]+=1\t\r\n\t\t\r\n\t\tif i == 0:\r\n\t\t\tcontinue\r\n\t\tif not Bigram.get(Line[i-1]+Line[i]):\r\n\t\t\tBigram[Line[i-1]+Line[i]]=0\r\n\t\tBigram[Line[i-1]+Line[i]]+=1\r\n\tTotal+=1\t\r\nIn.close()\r\n\r\nOut=open(\"prob.txt\",\"w\")\r\nProb=0.0\r\nfor k in Unigram.keys():\r\n\tProb=math.log(Unigram[k]/Total)\r\n\tprint(k,Prob,file=Out)\r\n\r\nfor k in Bigram.keys():\r\n\tif Unigram.get(k[0]):\r\n\t\tProb=math.log(Bigram[k]/Unigram[k[0]])\r\n\t\tprint(k,Prob,file=Out)\r\n\t\r\nOut.close()\r\n\t","sub_path":"type_writing/Bigram.py","file_name":"Bigram.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"139384054","text":"#!/usr/bin/env python3\n\nimport socket, sys\nimport libnotifymultiplex as libnotifymultiplex\n\nclass BadNotifyMethod(Exception):\n def __init__(self,string):\n self.string=string\n def __str__(self):\n return self.string\n\nclass __pynotify:\n def __init__(self):\n try:\n import pynotify\n except ImportError:\n raise BadNotifyMethod(\"pynotify not installed\")\n pynotify.init(\"notify-multiplexer\")\n \n def imageConvert(self, text):\n text=text.lower()\n if text=='im':\n return 'notification-message-im'\n return text\n \n def send(self, data):\n import pynotify\n n = pynotify.Notification(data['title'],data['text'],self.imageConvert(data['image']))\n n.set_hint_string(\"x-canonical-append\",\"true\")\n n.show()\n \nclass __pynotify2:\n def __init__(self):\n try:\n import notify2\n except ImportError:\n raise BadNotifyMethod(\"notify2 not installed\")\n notify2.init(\"notify-multiplexer\")\n \n def imageConvert(self, text):\n text=text.lower()\n if text=='im':\n return 'notification-message-im'\n return text\n \n def send(self, data):\n import notify2\n n = notify2.Notification(data['title'],data['text'],self.imageConvert(data['image']))\n n.set_hint_string(\"x-canonical-append\",\"true\")\n n.show()\n\nclass __growl12:\n def __init__(self):\n import subprocess\n try:\n subprocess.call(['growlnotify','--version'])\n except:\n raise BadNotifyMethod(\"growl not installed\")\n \n def imageConvert(self, text):\n text=text.lower()\n if text=='im':\n return 'notification-message-im'\n return text\n \n def send(self, data):\n import subprocess\n subprocess.call(['growlnotify','-t',data['title'],'-m',data['text']])\n\nconf = \"/etc/notify-multiplexer/notify-multiplexer.conf\"\n\nif (len(sys.argv)>1):\n conf = sys.argv[1]\n\nnclass=None\ntry:\n if nclass is None:\n nclass = __pynotify()\nexcept BadNotifyMethod as e:\n print(e)\n\ntry:\n if nclass is None:\n nclass = __pynotify2()\nexcept BadNotifyMethod as e:\n print(e)\n\ntry:\n if nclass is None:\n nclass = __growl12()\nexcept BadNotifyMethod as e:\n print(e)\n \nif nclass is None:\n print(\"No suitable notification methods found\")\n exit(1)\n \nsock = libnotifymultiplex.NotifyMultiplexReciever(conf)\nwhile True:\n data = sock.recv()\n if data!=None:\n nclass.send(data)","sub_path":"sink/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"227711603","text":"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n# @Time : 2019/10/6 16:55\n# @Author : HaoWang\n# @Site : HongKong, China\n# @project : $[PROJECT_NAME]\n# @File : 0704.What is Otus.py\n# @Software: PyCharm\n# @license: haowanghk@gmail.com \n\"\"\"\nimport cv2\nimport numpy as np\n\n# read images\nimg = cv2.imread('../pictures/noisy_color.jpg', 0)\n\nblur = cv2.GaussianBlur(img, (5, 5), 0)\n\n # find normalized_histogram, and its cumulative distribution function\n\nhist = cv2.calcHist([blur], [0], None, [256], [0, 256])\n\nhist_norm = hist.ravel() / hist.max()\n\nQ = hist_norm.cumsum()\n\n\nbins = np.arange(256)\n\n\nfn_min = np.inf\n\nthresh = -1\n\n\nfor i in range(1, 256):\n\n p1, p2 = np.hsplit(hist_norm, [i]) # probabilities\n\n q1, q2 = Q[i], Q[255] - Q[i] # cum sum of classes\n\n b1, b2 = np.hsplit(bins, [i]) # weights\n\n # finding means and variances\n\n m1, m2 = np.sum(p1 * b1) / q1, np.sum(p2 * b2) / q2\n\n v1, v2 = np.sum(((b1 - m1) ** 2) * p1) / q1, np.sum(((b2 - m2) ** 2) * p2) / q2\n\n # calculates the minimization function\n\n fn = v1 * q1 + v2 * q2\n\n if fn < fn_min:\n\n fn_min = fn\n\n thresh = i\n\n# find otsu's threshold value with OpenCV function\n\nret, otsu = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n\nprint(thresh, ret)","sub_path":"tutorial7-imageThresholding/0704.What is Otus.py","file_name":"0704.What is Otus.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"151154319","text":"import requests, json\n\nclass Wu:\n def __init__(self):\n self.project = \"\"\n self.run = \"\"\n self.clone = \"\"\n self.gen = \"\"\n self.unit = \"\"\n self.code = \"\"\n self.credit = \"\"\n self.credit_time = \"\"\n self.days = \"\"\n self.cpuid = \"\"\n\n def populate_apiInfo(self, user):\n try:\n uri = f\"https://api.foldingathome.org/project/{self.project}/run/{self.run}/clone/{self.clone}/gen/{self.gen}\"\n response = requests.get(uri)\n apiWus = json.loads(response.content)\n \n for apiWu in apiWus:\n if apiWu.get('user') == user:\n self.code = apiWu.get('code')\n self.credit = apiWu.get('credit')\n self.credit_time = apiWu.get('credit_time')\n self.days = apiWu.get('days')\n self.cpuid = apiWu.get('cpuid')\n \n except Exception as e:\n print(\"ERROR: cannot get info from API\")\n print(e)\n\n def __str__(self):\n return f\"{self.project},{self.run},{self.clone},{self.gen},{self.unit},{self.code},{self.credit},{self.credit_time},{self.days},{self.cpuid}\"","sub_path":"wu.py","file_name":"wu.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"268346404","text":"#!/usr/bin/env python\n\"\"\"\nadmin.py - Phenny Admin Module\nCopyright 2008-9, Sean B. Palmer, inamidst.com\nLicensed under the Eiffel Forum License 2.\n\nhttp://inamidst.com/phenny/\n\"\"\"\n\nimport re\n\ndef join(phenny, input): \n \"\"\"Join the specified channel. This is an admin-only command.\"\"\"\n # Can only be done in privmsg by an admin\n if input.sender.startswith('#'): \n return\n if input.admin: \n channel, key = input.group(1), input.group(2)\n if not key: \n phenny.write(['JOIN'], channel)\n else: \n phenny.write(['JOIN', channel, key])\njoin.rule = r'\\.join (#\\S+)(?: *(\\S+))?'\njoin.priority = 'low'\njoin.example = '.join #example or .join #example key'\n\ndef autojoin(phenny, input): \n \"\"\"Join the specified channel when invited by an admin.\"\"\"\n if input.admin: \n channel = input.group(1)\n phenny.write(['JOIN'], channel)\nautojoin.event = 'INVITE'\nautojoin.rule = r'(.*)'\n\ndef part(phenny, input): \n \"\"\"Part the specified channel. This is an admin-only command.\"\"\"\n # Can only be done in privmsg by an admin\n if input.sender.startswith('#'): \n return\n if input.admin: \n # TODO: add optional arguments for a part message\n phenny.write(['PART'], input.group(2).strip())\npart.commands = ['part']\npart.priority = 'high'\npart.example = '.part #example'\n\ndef quit(phenny, input): \n \"\"\"Quit from the server. This is an owner-only command. Quit message is optional.\"\"\"\n # Can only be done in privmsg by the owner\n if input.sender.startswith('#'): \n return\n if input.owner: \n # TODO: add optional arguments for a quit message\n a = input.group(2)\n if a: \n phenny.write(['QUIT', a])\n __import__('os')._exit(0)\n else:\n phenny.write(['QUIT'])\n __import__('os')._exit(0)\nquit.commands = ['quit']\nquit.priority = 'high'\nquit.example = '.quit message'\n\ndef msg(phenny, input): \n # Can only be done in privmsg by an admin\n if input.sender.startswith('#'): \n return\n a, b = input.group(2), input.group(3)\n if (not a) or (not b): \n return\n if input.admin: \n phenny.msg(a, b)\nmsg.rule = (['msg'], r'(#?\\S+) (.+)')\nmsg.priority = 'low'\n\ndef me(phenny, input): \n # Can only be done in privmsg by an admin\n if input.sender.startswith('#'): \n return\n if input.admin: \n msg = '\\x01ACTION {0}\\x01'.format(input.group(3))\n phenny.msg(input.group(2), msg)\nme.rule = (['me'], r'(#?\\S+) (.*)')\nme.priority = 'low'\n\ndef config_get(phenny, input):\n \"\"\"Get the config options for phenny or indicate \"\"\"\n if not input.admin:\n phenny.say(\"Silly, you're not allowed to use this command!\")\n return\n \n config_to_get = input.group(2).split(' ')[0]\n if config_to_get.lower() == 'password':\n phenny.say(\"Nuh uh! \" + phenny.config.owner + \" says that's a super-duper secret, and I promised to keep it!\")\n return\n elif config_to_get.lower() == 'f_list_password':\n phenny.say(\"Nuh uh! \" + phenny.config.owner + \" says that's a super-duper secret, and I promised to keep it!\")\n return\n elif config_to_get.lower() == 'youtube_api_key':\n phenny.say(\"Nuh uh! \" + phenny.config.owner + \" says that's a super-duper secret, and I promised to keep it!\")\n return\n elif config_to_get.lower() == 'wunderground_api_key':\n phenny.say(\"Nuh uh! \" + phenny.config.owner + \" says that's a super-duper secret, and I promised to keep it!\")\n return\n elif config_to_get.lower() == 'wordnik_api_key':\n phenny.say(\"Nuh uh! \" + phenny.config.owner + \" says that's a super-duper secret, and I promised to keep it!\")\n return\n elif config_to_get.lower() == 'serverpass':\n phenny.say(\"Nuh uh! \" + phenny.config.owner + \" says that's a super-duper secret, and I promised to keep it!\")\n return\n elif config_to_get.lower() == 'derpibooru_key':\n phenny.say(\"Nuh uh! \" + phenny.config.owner + \" says that's a super-duper secret, and I promised to keep it!\")\n return\n config_option = \"\"\n try:\n config_option = str(getattr(phenny.config, config_to_get))\n phenny.say(\"Looks like \" + config_to_get + \" is set to \" + config_option)\n except AttributeError:\n phenny.say(\"Oops, looks like I don't have an option called \" + config_to_get)\nconfig_get.rule = (['config_get','c_get'], r'(.*)')\nconfig_get.priority = 'low'\n\n# options to never change\ndonotchange = ['nick','host','port','ssl','ipv6','owner','password']\n\ndef config_set(phenny, input):\n \"\"\"Set a config option for phenny while the bot is running, ignoring options that can't or shouldn't be changed.\"\"\"\n if not input.admin:\n phenny.say(\"Silly, you're not allowed to use this command!\")\n return\n args = input.group(2).split(' ')\n config_to_set = args[0].lower()\n options = args[1:]\n if config_to_set.lower() in donotchange:\n phenny.say(\"Hey! \" + phenny.owner + \" says I'm not allowed to change that!\")\n return\n if not hasattr(phenny.config, config_to_set):\n phenny.say(\"Oops, looks like I don't have an option called \" + config_to_set)\n return\n existing_config = getattr(phenny.config, config_to_set)\n try:\n setattr(phenny.config, config_to_set, options)\n phenny.say(\"Woo! \" + config_to_set + \" has been updated to \" + str(getattr(phenny.config, config_to_set)))\n except:\n setattr(phenny.config, config_to_set, existing_config)\n phenny.say(\"Oh no! \" + config_to_set + \" hasn't been updated! Sticking with the original value of \"\n + existing_config + \" instead.\")\nconfig_set.rule = (['config_set','c_set'], r'(.*)')\nconfig_set.prioity = 'high'\n\n'''\ndef silence(phenny, input):\n def ishostmask(subject):\n to_match = '(.*)!(.*)@(.*)'\n return re.compile(to_match).match(subject)\n # Can only be done in privmsg by an admin\n if input.sender.startswith('#'): \n phenny.say('Nuh uh! Not here you can\\'t!')\n return\n if input.admin:\n silence = 'SILENCE'\n if input.group(2) not in ('+','-'):\n phenny.msg(input.sender, 'Come on, plus or minus! Add or remove! You know this!')\n else:\n if input.group(2) is '+':\n #add to the silence list\n silence = silence + ' +'\n elif input.group(2) is '-':\n #remove from the silence list\n silence = silence + ' -'\n if input.group(3):\n #discard everything after the first space\n mask_to_silence = input.group(3).partition(' ')[0]\n phenny.msg(input.sender, \"I'll be ignoring this (\" + input.group(3).partition(' ')[2] + \n \") extra bit. Follow the rules and Auntie Pinkie will take care of everything else!\")\n if ishostmask(mask_to_silence):\n silence = silence + mask_to_silence + ' a'\n phenny.msg(input.sender, '\"' + silence + '\" is being sent to the server')\n else:\n phenny.msg(input.sender, 'That\\'s not a hostmask, silly!')\n phenny.msg(input.sender, input.group(2) + ' group 2 input')\n phenny.msg(input.sender, input.group(3) + ' group 3 input')\nsilence.rule = (['silence'], r'(#?\\S+) (.+)')\nsilence.priority = 'high'\nsilence.example = '.silence + foo!bar@buzz.com to add a silence or .silence - fizz!bang@widgets.net to remove a silence'\n'''\n\nif __name__ == '__main__': \n print(__doc__.strip())\n","sub_path":"modules/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":7574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"601338198","text":"import tinychain as tc\nimport unittest\n\nfrom testutils import PORT, start_host\n\n\nclass Database(tc.Cluster):\n __uri__ = tc.URI(f\"http://127.0.0.1:{PORT}/app/db\")\n\n def _configure(self):\n schema = tc.table.Schema(\n [tc.Column(\"name\", tc.String, 100)],\n [tc.Column(\"year\", tc.UInt), tc.Column(\"description\", tc.String, 1000)])\n\n self.movies = tc.chain.Block(tc.table.Table(schema))\n\n @tc.get_method\n def has_movie(self, name: tc.String):\n return self.movies.contains([name])\n\n\nclass Web(tc.Cluster):\n __uri__ = tc.URI(f\"http://127.0.0.1:{PORT}/app/web\")\n\n def _configure(self):\n schema = tc.btree.Schema((tc.Column(\"name\", tc.String, 100), tc.Column(\"views\", tc.UInt)))\n self.cache = tc.chain.Sync(tc.btree.BTree(schema))\n\n @tc.get_method\n def views(self, name: tc.String) -> tc.UInt:\n return self.cache[name].first()[\"views\"]\n\n @tc.post_method\n def add_movie(self, name: tc.String, year: tc.U32, description: tc.String):\n db = tc.use(Database)\n\n return (\n db.movies.insert([name], [year, description]),\n self.cache.insert([name, 0]))\n\n @tc.put_method\n def add_view(self, txn, key: tc.String):\n txn.views = self.views(key)\n return tc.After(\n self.cache.delete(key),\n self.cache.insert([key, txn.views + 1]))\n\n\nclass DemoTests(unittest.TestCase):\n def setUp(self):\n self.hosts = []\n for i in range(3):\n port = PORT + i\n host_uri = tc.URI(f\"http://127.0.0.1:{port}\") + tc.uri(Web).path()\n host = start_host(\"table_demo\", [Database, Web], True, host_uri, wait_time=2)\n self.hosts.append(host)\n\n def testCache(self):\n self.hosts[1].post(\"/app/web/add_movie\", {\"name\": \"Up\", \"year\": 2009, \"description\": \"Pixar, balloons\"})\n\n for host in self.hosts:\n self.assertTrue(host.get(\"/app/db/has_movie\", \"Up\"))\n\n self.assertEqual(self.hosts[0].get(\"/app/web/views\", \"Up\"), 0)\n\n self.hosts[0].put(\"/app/web/add_view\", \"Up\")\n self.assertEqual(self.hosts[1].get(\"/app/web/views\", \"Up\"), 1)\n\n def tearDown(self):\n for host in self.hosts:\n host.stop()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_table_demo.py","file_name":"test_table_demo.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"47953660","text":"from time import time\nfrom sklearn import metrics\nfrom sklearn.cluster import KMeans, AffinityPropagation, SpectralClustering, AgglomerativeClustering, DBSCAN, MeanShift, \\\n estimate_bandwidth\nfrom sklearn.decomposition import PCA\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport numpy as np\n\ncategories = [\n 'alt.atheism',\n 'talk.religion.misc',\n 'comp.graphics',\n 'sci.space',\n]\ndataset = fetch_20newsgroups(subset='all', categories=categories,\n shuffle=True, random_state=42)\nvectorizer = TfidfVectorizer(max_df=0.5, max_features=2000, min_df=2, stop_words='english', use_idf=True)\nX = vectorizer.fit_transform(dataset.data)\n\ndata = X.toarray()\n\nn_samples, n_features = data.shape\n\nn_digits = 20\n\n\nlabels = dataset.target\n\n\nsample_size = 300\n\nprint(\"n_digits: %d, \\t n_samples %d, \\t n_features %d\"\n % (n_digits, n_samples, n_features))\n\nprint(82 * '_')\nprint('init\\t\\ttime\\thomo\\tcompl\\tv-meas\\tARI\\t\\tAMI')\n\ndef AllAlgorithm(estimator, name, data):\n t0 = time()\n predict = estimator.fit_predict(data)\n # print(predict)\n print('%-9s\\t%.2fs\\t%.3f\\t%.3f\\t%.3f\\t%.3f\\t%.3f'\n % (name, (time() - t0),\n metrics.homogeneity_score(labels, predict),\n metrics.completeness_score(labels, predict),\n metrics.v_measure_score(labels, predict),\n metrics.adjusted_rand_score(labels, predict),\n metrics.adjusted_mutual_info_score(labels, predict,\n average_method='arithmetic'),\n # metrics.silhouette_score(data, predict,\n # metric='euclidean',\n # sample_size=n_samples)\n )\n )\n del estimator\nAllAlgorithm(DBSCAN(eps=20, min_samples=2),\n name=\"DBSCAN\", data=data)\nbandwidth = estimate_bandwidth(data, quantile=0.3, n_samples=sample_size)\nprint(39 * '_' + 'PCA' + 40 * '_')\n\nreduced_data = PCA(n_components=10).fit_transform(data)\nbandwidth = estimate_bandwidth(data, quantile=0.3, n_samples=sample_size)\nAllAlgorithm(DBSCAN(eps=10, min_samples=2),\n name=\"DBSCAN\", data=reduced_data)\nprint(82 * '_')\n\n\n","sub_path":"first_practise/DBSCAN_doc.py","file_name":"DBSCAN_doc.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"360983262","text":"import sys, string, glob, os.path\nfrom gi.repository import Hinawa\n\nclass CuiKit():\n @staticmethod\n def _seek_snd_unit_from_guid(guid):\n for fullpath in glob.glob('/dev/snd/hw*'):\n try:\n unit = Hinawa.SndUnit()\n unit.open(fullpath)\n if unit.get_property('guid') == guid:\n return fullpath\n except:\n pass\n finally:\n del unit\n return None\n\n @staticmethod\n def _check_hexadecimal(literal):\n if literal.find('0x') == 0:\n literal = literal[2:]\n if len(literal) != 16:\n return False\n for character in literal:\n if character not in string.hexdigits:\n return False\n else:\n return True\n\n @staticmethod\n def _dump_help(cmdline):\n print('{0} CARD|GUID [FILE|CMD [ARGS]]'.format(cmdline))\n print(' CARD: the number as ALSA sound card, see /proc/asound/cards.')\n print(' GUID: global unique ID for your unit.')\n print(' FILE: path for a file with command list')\n print(' CMD: issue which you need')\n print(' ARGS: arguments for the command')\n\n @staticmethod\n def _dump_commands(cmds):\n print('Available commands:')\n for name in cmds.keys():\n print(' {0}'.format(name))\n\n @classmethod\n def seek_snd_unit_path(cls):\n args = sys.argv\n if len(args) > 1:\n identity = args[1]\n # Assume as sound card number if it's digit literal.\n if identity.isdigit():\n return '/dev/snd/hwC{0}D0'.format(identity)\n # Assume as GUID on IEEE 1394 bus if it's hexadecimal literal.\n elif cls._check_hexadecimal(identity):\n return cls._seek_snd_unit_from_guid(int(identity, base=16))\n cls._dump_help(args[0])\n return None\n\n @classmethod\n def dispatch_command(cls, unit, cmds):\n args = sys.argv\n if len(args) > 2:\n if args[2] in cmds:\n cmd = args[2]\n return cmds[cmd](unit, args[3:])\n if os.path.isfile(args[2]):\n filename = args[2]\n f = open(filename)\n for line in f.readlines():\n args = line.rstrip().split(' ')\n if len(args) > 0:\n cmd = args[0]\n if cmd not in cmds:\n print('Invalid command in {0}: {1}'.format(\n filename, cmd))\n return False\n if not cmds[cmd](unit, args[1:]):\n print('Invalid arguments in {0}: {1}'.format(\n filename, cmd))\n return False\n else:\n f.close()\n return True\n cls._dump_commands(cmds)\n return False\n","sub_path":"misc/cui_kit.py","file_name":"cui_kit.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"494213916","text":"from crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit\nfrom django.forms import ModelForm\nfrom django.forms.widgets import CheckboxSelectMultiple\n\nfrom fantasyfirst.settings import ACTIVE_YEAR\nfrom ff.models import League, DraftableEvent, Roster, Draft, DraftSlot, PickableTeam\n\n\nclass LeagueForm(ModelForm):\n def __init__(self, *args, **kwargs):\n super(LeagueForm, self).__init__(*args, **kwargs)\n self.fields['events'].widget = CheckboxSelectMultiple(attrs={\"checked\": \"\"})\n self.fields['events'].queryset = DraftableEvent.objects.filter(frc_event__year=ACTIVE_YEAR).order_by(\n 'frc_event__start_date').select_related('frc_event')\n\n class Meta:\n model = League\n fields = ['name', 'events', 'rosters']\n\n\nclass RosterForm(ModelForm):\n def __init__(self, *args, **kwargs):\n super(RosterForm, self).__init__(*args, **kwargs)\n\n class Meta:\n model = Roster\n fields = ['name']\n\n\nclass DraftForm(ModelForm):\n def __init__(self, *args, **kwargs):\n super(DraftForm, self).__init__(*args, **kwargs)\n\n class Meta:\n model = Draft\n fields = '__all__'\n\n\nclass DraftSlotForm(ModelForm):\n def __init__(self, *args, **kwargs):\n super(DraftSlotForm, self).__init__(*args, **kwargs)\n self.fields['pick'].required = False\n self.fields['pick'].queryset = PickableTeam.objects.filter(events=self.instance.draft.event).order_by(\n 'frc_team_id')\n\n class Meta:\n model = DraftSlot\n fields = ['number', 'roster', 'expire_time', 'pick']\n\n\nclass DraftSlotFormSetHelper(FormHelper):\n def __init__(self, *args, **kwargs):\n super(DraftSlotFormSetHelper, self).__init__(*args, **kwargs)\n self.form_method = 'POST'\n self.template = 'bootstrap/table_inline_formset.html'\n self.add_input(Submit('submit', 'Submit', css_class='btn-primary'))\n","sub_path":"ff/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"344817659","text":"import pandas as pd\nfrom pandas import Series, DataFrame\nfrom matplotlib import pyplot as plt\nfrom pandas.tools.plotting import table\n\n\ndef plotter(title, data):\n\n\t\"This function plots the x-data 'Elapsed Time' and y-data Reflection(Percent reflection) \\\n\tin a dataframe == data. Give the figure a title with the string 'title'\"\n \n\t#extract the data from the dataframe 'data'\n\tx = data['Elapsed Time']\n\ty = data['Reflection(Percent reflection)']\n \n\t#set up the axis subplot in the figure\n\tfig = plt.figure()\n\tax = fig.add_subplot(111)\n\tax.plot(x, y, color ='b')\n \n\t#customizing the figure \n\tax.tick_params(axis='both', labelsize=16, length = 12, width = 1)\n\tax.tick_params(axis = 'both', which = 'minor', length = 6, width = 1)\n\tax.set_title(title, fontsize = 18, fontname = 'Arial')\n\tax.set_xlabel(\"Time (s)\", fontname = 'Arial', fontsize = 18)\n\tax.set_ylabel(\"%R at 525 nm\", fontsize = 18, fontname = 'Arial')\n\tplt.minorticks_on()\n\tplt.xticks(fontname = 'Arial', fontsize = 18)\n\tplt.yticks(fontname = 'Arial', fontsize = 18)\n\tplt.tight_layout()\n\t#show the plot\n\tplt.show()\n\ndef growth_extract(dataframe, start, length, delta, n):\n\t'''This function extracts the rise portion of the data(dataframe), starting from time == 'start', with a \\\n\tlenght == 'lenght' and period == delta. n is a factor for naming the keys of the dictionary. Returns a \\\n\ta dictionary of dataframes with a just the rise portion of the data.'''\n\t\n\tNewDataFrameNames = [str(_) + '% RH' for _ in range(10, n*10, 10)]+['10% RH_repeat']\n\tDFD = {elem : pd.DataFrame for elem in NewDataFrameNames}\n\tfor key in DFD.keys():\n\t\tDFD[key] = dataframe[(dataframe['Elapsed Time']>=start) & (dataframe['Elapsed Time']<=(start+length))]\n\t\tstart += delta\n\treturn DFD\n \n\ndef normalize(datadict):\n '''Takes a dictionary of dataframes with x axis in seconds and normalizes them to all start \\ \n at time == 0. Returns a dictionary of normalized dataframes. Keys are the same as the oringinal \\\n dictionary of dataframes.'''\n\t\n #set x-axis with normalized value\n key_list = list(datadict.keys())\n # a list of the first x value in the 'Elapsed Time' Column for each dataframe in \n # the dataframe dictionary, datadict\n normalized_starts = [datadict[key].iloc[0,1] for key in key_list]\n x_normvalues = zip(key_list, normalized_starts)\n x_list = [datadict[x]['Elapsed Time'] - y for x,y in x_normvalues]\n \n # set y data with list of Elapsed Time from dataframedict\n y_list = [y_data['Reflection(Percent reflection)'] for y_data in datadict.values()]\n data = zip(x_list, y_list)\n newDFL = [pd.concat([x,y], axis =1) for x,y in data]\n\n #zip up keys and the normalized data list\n key_DF = zip(datadict.keys(), newDFL)\n normalized_DFD = {x: y for x,y in key_DF}\n return normalized_DFD\n\ndef combined_plotter(datadict, title):\n\t\n #make a single axes in a figure\n fig = plt.figure()\n ax = fig.add_subplot(111)\n \n #iterate through dataframe dictionary to get the desired data and labeling it\n for key in datadict.keys():\n\t ax.plot(datadict[key]['Elapsed Time'], datadict[key]['Reflection(Percent reflection)'], label= key)\n \n \n #customizing the figure \n ax.set_xlim(-10, 310)\n ax.tick_params(axis='both', labelsize=16, length = 12, width = 1)\n ax.tick_params(axis = 'both', which = 'minor', length = 6, width = 1)\n ax.set_title(title, fontsize = 18, fontname = 'Arial')\n ax.set_xlabel(\"Time (s)\", fontname = 'Arial', fontsize = 18)\n ax.set_ylabel(\"%R at 525 nm\",fontsize = 18, fontname = 'Arial')\n plt.minorticks_on()\n plt.xticks(fontname = 'Arial', fontsize = 18)\n plt.yticks(fontname = 'Arial', fontsize = 18)\n plt.legend(loc='best')\n #still need to change the font of the legend to Arial\n plt.tight_layout()\n\t#show the plot\n plt.show()\n \ndef halfsat_valueandtime(dataframedict):\n # set y data with list of Elapsed Time from dataframedict\n y_list = [df['Reflection(Percent reflection)'] for df in dataframedict.values()]\n\n # create a list of all the half saturation points for all calculate the \n half_sat = [(y[-300:].mean() + float(y.values[0]))/2 for y in y_list]\n \n \n\t# zip together keys and half sat list to put into a new dictionary\n blah = zip(dataframedict.keys(), half_sat)\n half_sat_dict = {x:y for x,y in blah}\n \n\n # creating a list of the seven closest \n list_of_closevalues = [dataframedict[key].iloc[(dataframedict[key]['Reflection(Percent reflection)']-half_sat_dict[key]).abs().argsort()[:7]] for key in dataframedict.keys()]\n \n \n half_sat_time = [list_of_closevalues[x]['Elapsed Time'].mean() for x in range(len(list_of_closevalues))]\n \n half_sat_time_std = [list_of_closevalues[x]['Elapsed Time'].std() for x in range(len(list_of_closevalues))]\n\n results = DataFrame({'Half Saturation Value (%R)': half_sat, 'Half Saturation Time (s)': half_sat_time, 'Half Saturation Time Std (s)': half_sat_time_std}, columns = ['Half Saturation Value (%R)', 'Half Saturation Time (s)', 'Half Saturation Time Std (s)'],index = list(dataframedict.keys()))\n return results\n\ndef maketable(df):\n\tfig, ax = plt.subplots(figsize=(12, 2)) # set size frame\n\tax.xaxis.set_visible(False) # hide the x axis\n\tax.yaxis.set_visible(False) # hide the y axis\n\tax.set_frame_on(False) # no visible frame, uncomment if size is ok\n\ttabla = table(ax, df, loc='upper right', colWidths=[0.17]*len(df.columns)) # where df is your data frame\n\ttabla.auto_set_font_size(False) # Activate set fontsize manually\n\ttabla.set_fontsize(12) # if ++fontsize is necessary ++colWidths\n\ttabla.scale(1.2, 1.2) # change size table\n\t#plt.savefig('table.png', transparent=True)\n\tplt.show()\n\tplt.tight_layout()\n\t#plt.savefig('table.png', transparent=True)\n\n\nLocation = 'Stepping from 10-20-30-40-50-60-10 RH 7 min on 15 min off.TimeSeries'\ndf = pd.read_csv(Location, sep = '\\t', header = 4)\ndf.info()\ndf.head()\n\nplotter('Full Data', df)\nDFD = growth_extract(df, 120, 420, 1920, 7)\nnormalizedDFD = normalize(DFD)\ncombined_plotter(normalizedDFD, 'Normalized Rise Data')\nresults = halfsat_valueandtime(normalizedDFD)\nprint(results)\n\nmaketable(results)\n","sub_path":"HumidDetectorWorkUp.py","file_name":"HumidDetectorWorkUp.py","file_ext":"py","file_size_in_byte":6168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"450983870","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom fractions import Fraction\n\n#a, b = np.genfromtxt('8bWerteLC1C2', unpack=True)\n#x mit pi/n\n#y *2pi für kreisfrequenz\nx = np.linspace(0, 15)\ntheta = x * np.pi /16\nlreziprok=(1/(1.75*10**-3))\nc1reziprok=(1/(22.0*10**-9))\nc2reziprok=(1/(9.39*10**-9))\nc1malc2=((22.0*10**-9)*(9.39*10**-9))\nTerm1=(lreziprok*(c1reziprok+c2reziprok))\nTerm2=lreziprok*np.sqrt(((c1reziprok+c2reziprok)**2)-(4*(np.sin(theta)**2))/(c1malc2))\ny = Term1 - Term2\nplt.plot(theta, np.sqrt(y)/1000, 'b-', label=r'Theoriekurve')\ny = Term1 + Term2\nplt.plot(theta, np.sqrt(y)/1000, 'b-')\na, b = np.genfromtxt('8bWerteLC1C2', unpack=True)\nx = a * np.pi /16 #ggf ändern\ny = b * 2 * np.pi\nplt.plot(x, y/1000, 'rx', label=r'Messdaten')\nplt.ylabel(r'$\\omega (Khz)$')\nplt.xlabel(r'$\\Theta (rad)$')\nplt.legend(loc=\"best\")\nplt.tight_layout()\nplt.grid()\nplt.savefig('9bLC1C2.pdf')\n","sub_path":"356/9bLC1C2.py","file_name":"9bLC1C2.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"449813640","text":"# -*- coding: utf-8 -*-\n# @Time : 2021-06-29 14:32\n# @Author : zxl\n# @FileName: 119_2.py\n\nclass Solution:\n def getRow(self, rowIndex: int) :\n\n\n arr = [1 for i in range(rowIndex+1)]\n\n for i in range(1,rowIndex+1):\n for j in range(i-1,0,-1):\n arr[j] = arr[j]+arr[j-1]\n return arr\nobj = Solution()\nrowIndex = 4\nans = obj.getRow(rowIndex)\nprint(ans)\n\n","sub_path":"119_2.py","file_name":"119_2.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"318586630","text":"__author__ = 'saeedamen' # Saeed Amen / saeed@thalesians.com\n\n#\n# Copyright 2015 Thalesians Ltd. - http//www.thalesians.com / @thalesians\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the\n# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and limitations under the License.\n#\n\n\"\"\"\nTimeSeriesFilter\n\nFunctions for filtering time series by dates and columns.\n\n\"\"\"\n\nfrom pythalesians.util.loggermanager import LoggerManager\n\nfrom pandas.tseries.offsets import CustomBusinessDay\n\nimport numpy as np\nimport pandas\nimport pytz\n\nclass TimeSeriesFilter:\n\n _time_series_cache = {} # shared across all instances of object!\n\n def __init__(self):\n self.logger = LoggerManager().getLogger(__name__)\n return\n\n def filter_time_series(self, time_series_request, data_frame):\n start_date = time_series_request.start_date\n finish_date = time_series_request.finish_date\n\n data_frame = self.filter_time_series_by_date(start_date, finish_date, data_frame)\n\n # filter by ticker.field combinations requested\n columns = self.create_tickers_fields_list(time_series_request)\n data_frame = self.filter_time_series_by_columns(columns, data_frame)\n\n return data_frame\n\n def create_calendar_bus_days(self, start, end, cal = 'FX'):\n hols = self.get_holidays(start, end, cal)\n index = pandas.bdate_range(start=start, end=end, freq='D')\n\n return [x for x in index if x not in hols]\n\n def get_holidays(self, start, end, cal = 'FX'):\n # TODO use Pandas CustomBusinessDays to get more calendars\n holidays_list = []\n\n if cal == 'FX':\n # filter for Christmas & New Year's Day\n for i in range(1970, 2020):\n holidays_list.append(str(i) + \"-12-25\")\n holidays_list.append(str(i) + \"-01-01\")\n\n if cal == 'WEEKDAY':\n bday = CustomBusinessDay(weekmask='Sat Sun')\n\n holidays_list = pandas.date_range(start, end, freq=bday)\n\n holidays_list = pandas.to_datetime(holidays_list).order()\n\n # floor start date\n start = np.datetime64(start) - np.timedelta64(1, 'D')\n\n # ceiling end date\n end = np.datetime64(end) + np.timedelta64(1, 'D')\n\n holidays_list = [x for x in holidays_list if x >= start and x <= end]\n\n return pandas.to_datetime(holidays_list)\n\n def filter_time_series_by_holidays(self, data_frame, cal = 'FX'):\n\n # optimal case for weekdays: remove Saturday and Sunday\n if (cal == 'WEEKDAY'):\n return data_frame.ix[data_frame.index.dayofweek <= 4]\n\n # select only those holidays in the sample\n holidays_start = self.get_holidays(data_frame.index[0], data_frame.index[-1], cal)\n\n if(holidays_start.size == 0):\n return data_frame\n\n holidays_end = holidays_start + np.timedelta64(1,'D')\n\n # floored_dates = data_frame.index.normalize()\n #\n # filter_by_index_start = floored_dates.searchsorted(holidays_start)\n # filter_by_index_end = floored_dates.searchsorted(holidays_end)\n #\n # indices_to_keep = []\n #\n # if filter_by_index_end[0] == 0:\n # counter = filter_by_index_end[0] + 1\n # start_index = 1\n # else:\n # counter = 0\n # start_index = 0\n #\n # for i in range(start_index, len(holidays_start)):\n # indices = list(range(counter, filter_by_index_start[i] - 1))\n # indices_to_keep = indices_to_keep + indices\n #\n # counter = filter_by_index_end[i] + 1\n #\n # indices = list(range(counter, len(floored_dates)))\n # indices_to_keep = indices_to_keep + indices\n #\n # data_frame_filtered = data_frame.ix[indices_to_keep]\n\n data_frame_left = data_frame\n data_frame_filtered = []\n\n for i in range(0, len(holidays_start)):\n data_frame_temp = data_frame_left.ix[data_frame_left.index < holidays_start[i]]\n data_frame_left = data_frame_left.ix[data_frame_left.index >= holidays_end[i]]\n\n data_frame_filtered.append(data_frame_temp)\n\n data_frame_filtered.append(data_frame_left)\n\n return pandas.concat(data_frame_filtered)\n\n def filter_time_series_by_date(self, start_date, finish_date, data_frame):\n offset = 0 # inclusive\n\n return self.filter_time_series_by_date_offset(start_date, finish_date, data_frame, offset)\n\n def filter_time_series_by_date_exc(self, start_date, finish_date, data_frame):\n offset = 1 # exclusive of start finish date\n\n return self.filter_time_series_by_date_offset(start_date, finish_date, data_frame, offset)\n\n # try:\n # # filter by dates for intraday data\n # if(start_date is not None):\n # data_frame = data_frame.loc[start_date <= data_frame.index]\n #\n # if(finish_date is not None):\n # # filter by start_date and finish_date\n # data_frame = data_frame.loc[data_frame.index <= finish_date]\n # except:\n # # filter by dates for daily data\n # if(start_date is not None):\n # data_frame = data_frame.loc[start_date.date() <= data_frame.index]\n #\n # if(finish_date is not None):\n # # filter by start_date and finish_date\n # data_frame = data_frame.loc[data_frame.index <= finish_date.date()]\n #\n # return data_frame\n\n def filter_time_series_by_date_offset(self, start_date, finish_date, data_frame, offset):\n try:\n data_frame = self.filter_time_series_aux(start_date, finish_date, data_frame, offset)\n except:\n # start_date = start_date.date()\n # finish_date = finish_date.date()\n # if isinstance(start_date, str):\n # # format expected 'Jun 1 2005 01:33', '%b %d %Y %H:%M'\n # try:\n # start_date = datetime.datetime.strptime(start_date, '%b %d %Y %H:%M')\n # except:\n # i = 0\n #\n # if isinstance(finish_date, str):\n # # format expected 'Jun 1 2005 01:33', '%b %d %Y %H:%M'\n # try:\n # finish_date = datetime.datetime.strptime(finish_date, '%b %d %Y %H:%M')\n # except:\n # i = 0\n\n # if we have dates stored as opposed to TimeStamps (ie. daily data), we use a simple (slower) method\n # for filtering daily data\n if(start_date is not None):\n data_frame = data_frame.loc[start_date.date() < data_frame.index]\n\n if(finish_date is not None):\n # filter by start_date and finish_date\n data_frame = data_frame.loc[data_frame.index < finish_date.date()]\n\n return data_frame\n\n def filter_time_series_aux(self, start_date, finish_date, data_frame, offset):\n start_index = 0\n finish_index = len(data_frame.index) - offset\n\n # filter by dates for intraday data\n if(start_date is not None):\n start_index = data_frame.index.searchsorted(start_date)\n\n if (0 <= start_index + offset < len(data_frame.index)):\n start_index = start_index + offset\n\n # data_frame = data_frame.ix[start_date < data_frame.index]\n\n if(finish_date is not None):\n finish_index = data_frame.index.searchsorted(finish_date)\n\n if (0 <= finish_index - offset < len(data_frame.index)):\n finish_index = finish_index - offset\n\n # data_frame = data_frame[data_frame.index < finish_date]\n\n return data_frame.ix[start_index:finish_index]\n\n def filter_time_series_by_time_of_day(self, hour, minute, data_frame, in_tz = None, out_tz = None):\n if out_tz is not None:\n if in_tz is not None:\n data_frame = data_frame.tz_localize(pytz.timezone(in_tz))\n\n data_frame = data_frame.tz_convert(pytz.timezone(out_tz))\n\n # change internal representation of time\n data_frame.index = pandas.DatetimeIndex(data_frame.index.values)\n\n data_frame = data_frame[data_frame.index.minute == minute]\n data_frame = data_frame[data_frame.index.hour == hour]\n\n return data_frame\n\n def filter_time_series_between_hours(self, start_hour, finish_hour, data_frame):\n data_frame = data_frame[data_frame.index.hour <= finish_hour]\n data_frame = data_frame[data_frame.index.hour >= start_hour]\n\n return data_frame\n\n def filter_time_series_by_columns(self, columns, data_frame):\n return data_frame[columns]\n\n def filter_time_series_by_excluded_keyword(self, keyword, data_frame):\n columns = [elem for elem in data_frame.columns if keyword not in elem]\n\n return self.filter_time_series_by_columns(columns, data_frame)\n\n def filter_time_series_by_included_keyword(self, keyword, data_frame):\n columns = [elem for elem in data_frame.columns if keyword in elem]\n\n return self.filter_time_series_by_columns(columns, data_frame)\n\n def filter_time_series_by_minute_freq(self, freq, data_frame):\n return data_frame.loc[data_frame.index.minute % freq == 0]\n\n def create_tickers_fields_list(self, time_series_request):\n tickers = time_series_request.tickers\n fields = time_series_request.fields\n\n if isinstance(tickers, str): tickers = [tickers]\n if isinstance(fields, str): fields = [fields]\n\n tickers_fields_list = []\n\n # create ticker.field combination for series we wish to return\n for f in fields:\n for t in tickers:\n tickers_fields_list.append(t + '.' + f)\n\n return tickers_fields_list\n\n def resample_time_series(self, data_frame, freq):\n return data_frame.asfreq(freq, method = 'pad')\n\n def remove_out_FX_out_of_hours(self, data_frame):\n # assume data_frame is in GMT time\n # remove Fri after 22:00 GMT\n # remove Sat\n # remove Sun before 22:00 GMT\n\n # Monday = 0, ..., Sunday = 6\n data_frame = data_frame.ix[~((data_frame.index.dayofweek == 4) & (data_frame.index.hour > 22))]\n data_frame = data_frame.ix[~((data_frame.index.dayofweek == 5))]\n data_frame = data_frame.ix[~((data_frame.index.dayofweek == 6)& (data_frame.index.hour < 22))]\n\n return data_frame\n\n# functions to test class\nif __name__ == '__main__':\n\n logger = LoggerManager.getLogger(__name__)\n\n tsf = TimeSeriesFilter()\n\n if False:\n start = pandas.to_datetime('2000-01-01')\n end = pandas.to_datetime('2020-01-01')\n\n logger.info('Get FX holidays')\n hols = tsf.get_holidays(start, end, cal='FX')\n print(hols)\n\n logger.info('Get business days, excluding holidays')\n bus_days = tsf.create_calendar_bus_days(start, end, cal='FX')\n print(bus_days)\n\n if False:\n logger.info('Remove out of hours')\n\n rng = pandas.date_range('01 Jan 2014', '05 Jan 2014', freq='1min')\n intraday_vals = pandas.DataFrame(data=pandas.np.random.randn(len(rng)), index=rng)\n\n intraday_vals = tsf.resample_time_series(intraday_vals, '60min')\n intraday_vals = tsf.remove_out_FX_out_of_hours(intraday_vals)\n\n print(intraday_vals)\n\n if True:\n logger.info('Remove holiday days')\n\n rng = pandas.date_range('01 Jan 2007', '05 Jan 2014', freq='1min')\n intraday_vals = pandas.DataFrame(data=pandas.np.random.randn(len(rng)), index=rng)\n\n import cProfile\n\n cProfile.run(\"intraday_vals = tsf.filter_time_series_by_holidays(intraday_vals, 'FX')\")\n\n print(intraday_vals)\n\n","sub_path":"Python/Research/Prototypes/pythalesians adaptation/pythalesians/timeseries/calcs/timeseriesfilter.py","file_name":"timeseriesfilter.py","file_ext":"py","file_size_in_byte":12156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"138383506","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport sys\n\n\nclass Process_Image():\n def __init__(self, img, img2, area,mask):\n self.mask = mask\n self.img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n self.img = cv2.resize(self.img[area[0]:area[1],area[2]:area[3]], (1080,1920), interpolation=cv2.INTER_CUBIC)\n self.img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\n self.img2 = cv2.resize(self.img2[area[0]:area[1],area[2]:area[3]], (1080,1920),interpolation=cv2.INTER_CUBIC)\n self.FDI(15)\n self.Close3()\n #self.FC()\n #self.Mask()\n #self.GB()\n self.HC()\n #self.out = self.img3\n\n def FDI(self, T):\n self.img3 = cv2.absdiff(self.img, self.img2)\n self.img3 = np.where(self.img3 > T, 255, 0).astype('uint8')\n\n def Close(self):\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))\n self.img3 = cv2.morphologyEx(self.img3, cv2.MORPH_OPEN, kernel, iterations=1)\n self.img3 = cv2.dilate(self.img3, kernel, iterations=1)\n del kernel\n\n def Close2(self):\n kernel3 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))\n self.img3 = cv2.erode(self.img3, kernel3, iterations=1)\n self.img3 = cv2.dilate(self.img3, kernel, iterations=1)\n #self.img3 = cv2.erode(self.img3, kernel,iterations=2)\n #self.img3 = cv2.dilate(self.img3, kernel,iterations=1)\n #self.img3 = cv2.morphologyEx(self.img3, cv2.MORPH_OPEN, kernel,iterations = 2)\n #self.img3 = cv2.dilate(self.img3, kernel,iterations=1)\n del kernel\n\n def Close3(self):\n kernel3 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))\n kernel5 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))\n kernel7 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7))\n self.img3 = cv2.dilate(self.img3, kernel5, iterations=1)\n self.img3 = cv2.erode(self.img3, kernel7, iterations=1)\n self.img3 = cv2.dilate(self.img3, kernel5, iterations=1)\n self.img3 = cv2.erode(self.img3, kernel3, iterations=1)\n del kernel3\n del kernel5\n del kernel7\n\n def Mask(self):\n mask1 = np.zeros(self.img.shape[0:2], dtype=\"uint8\")\n cv2.rectangle(mask1, self.mask[0:2], self.mask[2:4], 255, -1)\n self.img3 = cv2.add(self.img3, self.img3, mask=mask1)\n del mask1\n\n def GB(self):\n self.img3 = cv2.GaussianBlur(self.img3, (5, 5), 0)\n self.img3 = np.where(self.img3 > 1, 255, 0).astype('uint8')\n\n def HC(self):\n #self.point = cv2.HoughCircles(self.img3,cv2.HOUGH_GRADIENT,1, 150,param1=70,param2=10,minRadius=8,maxRadius=70)\n self.point = cv2.HoughCircles(self.img3, cv2.HOUGH_GRADIENT, 1, 150, param1=70, param2=10, minRadius=8, maxRadius=70) \n self.out = cv2.cvtColor(self.img3, cv2.COLOR_GRAY2BGR)\n if self.point is None:\n print('none')\n pass\n else:\n #print(self.point)\n for i in self.point[0]:\n leftup = [int(i[0]-i[2]),int(i[1]-i[2])]\n tmp = self.img3[leftup[1]:leftup[1]+int(2*i[2]),leftup[0]:leftup[0]+int(2*i[2])]\n #tmp = self.out[leftup[0]:leftup[0]+int(2*i[2]),leftup[1]:leftup[1]+int(2*i[2])]\n #print(leftup[0],leftup[0]+int(2*i[2]),leftup[1],leftup[1]+int(2*i[2]))\n #print(int(2*i[2])*int(2*i[2]))\n if cv2.countNonZero(tmp) >= int(2*i[2]*2*i[2]*0.55):\n #text = str(int(2*i[2]*2*i[2]*0.65)) + ' ' + str(cv2.countNonZero(tmp))\n #cv2.putText(self.out, text, (leftup[0],leftup[1]), cv2.FONT_HERSHEY_SIMPLEX,1, (0, 0, 255), 1, cv2.LINE_AA)\n cv2.circle(self.out, (i[0], i[1]), int(i[2]), (0, 0, 255), 3)\n\n def FC(self):\n ret, thresh = cv2.threshold(self.img3, 125, 255, 1)\n cnts, hierarchy = cv2.findContours(\n thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n self.out = cv2.cvtColor(self.img3, cv2.COLOR_GRAY2BGR)\n for c in cnts:\n if cv2.contourArea(c) < 80:\n continue\n if cv2.contourArea(c) > 3000:\n continue\n (x, y, w, h) = cv2.boundingRect(c)\n cv2.rectangle(self.out, (x, y), (x + w, y + h), (0, 255, 0), 2)\n cv2.drawContours(self.out, cnts, -1, (0, 255, 255), 2)\n\n\n\n'''\nlistA = [['606','607'],['878','879'],[\"894\",\"895\"],[\"3878\",\"3879\"],[\"4196\",\"4197\"]]\nfor i in listA:\n pa = \"/Users/teddy/Desktop/project/src/0311/\"+i[0]+\".jpg\"\n pb = \"/Users/teddy/Desktop/project/src/0311/\"+i[1]+\".jpg\"\n print(pa,pb)\n i1 = cv2.imread(pa)\n i2 = cv2.imread(pb)\n A = Process_Image(i1,i2,[0,1920,0,1080],(0,400,1080,1300))\n cv2.imwrite(i[0]+'+'+i[1]+'_2.jpg',A.out)\n\n\n\nlistA = [['0320_1_2124','0320_1_2125'],['0320_1_2125','0320_1_2126'],[\"0320_1_2147\",\"0320_1_2148\"],[\"0320_1_2148\",\"0320_1_2149\"]]\n#listA = [['0320_1_2124','0320_1_2125']]\nfor i in listA:\n pa = \"/Users/teddy/Desktop/project/src/0323/\"+i[0]+\".jpg\"\n pb = \"/Users/teddy/Desktop/project/src/0323/\"+i[1]+\".jpg\"\n print(pa,pb)\n i1 = cv2.imread(pa)\n i2 = cv2.imread(pb)\n t0 = time.time()\n A = Process_Image(i1,i2,[640,1920,100,820],(0,400,1080,1300))\n t1 = time.time()\n print(t1-t0)\n cv2.imwrite(i[0]+'+'+i[1]+'_2.jpg',A.out)'''","sub_path":"libs/pre_process.py","file_name":"pre_process.py","file_ext":"py","file_size_in_byte":5399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"525212807","text":"n = int(input('Enter the first term of the arithmetical progression: '))\nm = int(input('Enter the ratio of the arithmetical progression: '))\nc = 1\nprint(\"{}, \".format(n), end='')\nwhile c < 10:\n n += m\n c += 1\n print(\"{}.\\n\".format(n) if c == 10 else \"{}, \".format(n), end='')\nc = int(input('Continue?\\n[1]YES\\n[2]NO\\n'))\nwhile c != 2:\n if c == 1:\n q = int(input('Enter how much times: '))\n x = 0\n while x < q:\n n += m\n x += 1\n print(\"{}.\\n\".format(n) if x == q else \"{}, \".format(n), end='')\n c = int(input('Continue?\\n[1]YES\\n[2]NO\\n'))\n else:\n c = int(input('Enter a valuable number \\n'))\nprint('Bye!')\n\n\n","sub_path":"exercicosPython/exercises/ex001_114/ex062ex061better.py","file_name":"ex062ex061better.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"117790937","text":"from __future__ import annotations\n\nimport datashape as ds\nfrom datashape import DataShape as DataShape\nimport pandas as pd\nimport numpy as np\nimport pyarrow as pa\nfrom progressivis.core.utils import integer_types, gen_columns\n\nfrom typing import (\n Union,\n Tuple,\n Dict,\n Any,\n List,\n Optional,\n Sequence,\n Type,\n TYPE_CHECKING,\n)\n\n\nExtensionDtype = pd.api.extensions.ExtensionDtype\n\n\nif TYPE_CHECKING:\n from .table_base import BasePTable\n\n\ndef dshape_print(dshape: Union[ds.Mono, str]) -> str:\n return ds.pprint(dshape, 1000000)\n\n\ndef dshape_fields(dshape: DataShape) -> Tuple[Tuple[str, ds.Mono], ...]:\n return dshape[0].fields\n\n\ndef dshape_table_check(dshape: DataShape) -> bool:\n return len(dshape) == 1 and isinstance(dshape[0], ds.Record)\n\n\ndef dshape_create(x: Union[DataShape, str, ds.Mono, Sequence[Any]]) -> DataShape:\n \"Create a datashape, maybe check later to limit to known types.\"\n return ds.dshape(x)\n\n\ndef dshape_comp_to_shape(x: Any, var: Any) -> Any:\n if isinstance(x, ds.Var):\n return var\n else:\n return int(x)\n\n\ndef dshape_to_shape(dshape: ds.Mono, var: Optional[Any] = None) -> List[Any]:\n return [dshape_comp_to_shape(x, var) for x in dshape.shape]\n\n\nOBJECT = np.dtype(\"O\")\nVSTRING = OBJECT\n\n\ndef dshape_to_h5py(dshape: DataShape) -> str:\n dtype = dshape.measure.to_numpy_dtype()\n if dtype == OBJECT:\n return VSTRING.str\n return dtype.str\n\n\ndef dshape_from_dtype(dtype: Union[np.dtype[Any], Type[Any]]) -> str:\n if dtype is str:\n return \"string\"\n if dtype is object:\n return \"object\"\n if dtype is bool:\n return \"bool\"\n if dtype is int:\n return \"int64\"\n assert isinstance(dtype, np.dtype)\n return str(ds.CType.from_numpy_dtype(dtype))\n\n\ndef dshape_extract(\n data: Any, columns: Optional[List[str]] = None\n) -> Optional[DataShape]:\n if data is None:\n return None\n if hasattr(data, \"dshape\"):\n return data.dshape # type: ignore\n if isinstance(data, np.ndarray):\n dshape = dshape_from_dtype(data.dtype)\n if columns is None:\n columns = gen_columns(len(data))\n dshapes = [\"%s: %s\" % (column, dshape) for column in columns]\n return ds.dshape(\"{\" + \", \".join(dshapes) + \"}\")\n if isinstance(data, pd.DataFrame):\n return dshape_from_dataframe(data)\n if isinstance(data, pa.RecordBatch):\n return dshape_from_pa_batch(data)\n if isinstance(data, dict):\n return dshape_from_dict(data)\n return None\n\n\ndef dshape_projection(\n table: BasePTable,\n columns: Optional[List[str]] = None,\n names: Optional[List[str]] = None,\n) -> DataShape:\n if columns is None and names is None:\n return table.dshape\n dshapes: List[str] = []\n if names is None:\n names = columns\n assert columns is not None and names is not None\n assert len(columns) == len(names)\n for colname, newname in zip(columns, names):\n col = table._column(colname)\n if len(col.shape) > 1:\n dshapes.append(\"%s: %d * %s\" % (newname, col.shape[1], col.dshape))\n else:\n dshapes.append(\"%s: %s\" % (newname, col.dshape))\n return ds.dshape(\"{\" + \",\".join(dshapes) + \"}\")\n\n\ndef dshape_from_columns(table: BasePTable, columns: List[str], dshape: Any) -> DataShape:\n dshapes: List[str] = []\n for colname in columns:\n col = table._column(colname)\n if len(col.shape) > 1:\n dshapes.append(\"%s: %d * %s\" % (col.name, col.shape[1], dshape))\n else:\n dshapes.append(\"%s: %s\" % (col.name, dshape))\n return ds.dshape(\"{\" + \",\".join(dshapes) + \"}\")\n\n\ndef dataframe_dshape(dtype: Union[np.dtype[Any], ExtensionDtype]) -> str:\n if dtype == OBJECT:\n return \"string\"\n if dtype.name.startswith(\"datetime\"):\n return \"6*uint16\"\n return str(dtype)\n\n\ndef np_dshape(v: Any, skip: int = 1) -> str:\n dshape: str\n if isinstance(v, np.ndarray):\n if v.dtype == OBJECT:\n dshape = \"string\"\n else:\n dshape = v.dtype.name\n shape = v.shape\n for d in shape[skip:]:\n dshape = \"%d * %s\" % (d, dshape)\n elif isinstance(v, list):\n e = v[0]\n if isinstance(e, str):\n dshape = \"string\"\n elif isinstance(e, integer_types):\n dshape = \"int\"\n elif isinstance(e, float):\n dshape = \"float64\"\n elif isinstance(e, tuple):\n dshape = np_dshape(np.array(e), skip=0) # recursive call\n elif isinstance(e, np.ndarray):\n dshape = np_dshape(e, skip=0) # recursive call\n else:\n raise ValueError(\"unknown dshape for %s\" % v)\n return dshape\n\n\ndef dshape_from_dict(d: Dict[str, Any]) -> DataShape:\n shape = \",\".join([\"%s: %s\" % (c, np_dshape(d[c])) for c in d])\n return ds.dshape(\"{\" + shape + \"}\")\n\n\n# def dshape_from_pytable(pt) -> DataShape:\n# shape = \",\".join([\"{}: {}\".format(c, pt.coltypes[c]) for c in pt.colnames])\n# return ds.dshape(\"{\" + shape + \"}\")\n\n\ndef dshape_from_dataframe(df: pd.DataFrame) -> DataShape:\n columns = df.columns\n if columns.dtype == np.int64:\n shape = \",\".join(\n [\"_%s:%s\" % (df[c].name, dataframe_dshape(df[c].dtype)) for c in df]\n )\n else:\n shape = \",\".join(\n [\"%s:%s\" % (df[c].name, dataframe_dshape(df[c].dtype)) for c in df]\n )\n return ds.dshape(\"{\" + shape + \"}\")\n\n\ndef dshape_from_pa_batch(df: pa.RecordBatch) -> DataShape:\n col_types = zip(df.schema.names, df.schema.types)\n shape = \",\".join(\n [\n f\"{c}:{dataframe_dshape(np.dtype(t.to_pandas_dtype()))}\"\n for (c, t) in col_types\n ]\n )\n return ds.dshape(\"{\" + shape + \"}\")\n\n\ndef array_dshape(\n df: Union[np.ndarray[Any, Any], BasePTable, pd.DataFrame], array_col: str\n) -> DataShape:\n if isinstance(df, np.ndarray):\n shape = dataframe_dshape(df.dtype)\n length = df.shape[1]\n else:\n col_dshapes = set([dataframe_dshape(df[c].dtype) for c in df.columns])\n if len(col_dshapes) != 1:\n raise ValueError(\"All column must have the same data type\")\n shape = col_dshapes.pop()\n length = len(df.columns)\n if length == 1:\n return ds.dshape(f\"{{{array_col}: {shape}}}\")\n else:\n return ds.dshape(f\"{{{array_col}: {length} * {shape}}}\")\n\n\n# myds = dshape(\"{a: int, b: float32, c: string, d:string, e:string, f:int32, g:float32}\")\n# get_projection_dshape(myds, [2,4,6])\ndef get_projection_dshape(dshape_: DataShape, projection_ix: List[int]) -> DataShape:\n shape = \",\".join(\n [\n \"{arg[0]}:{arg[1]}\".format(arg=dshape_[0].fields[elt])\n for elt in projection_ix\n ]\n )\n return ds.dshape(\"{\" + shape + \"}\")\n\n\n# get_projection_dshape_with_keys(myds, ['c','e','g'])\ndef get_projection_dshape_with_keys(\n dshape_: DataShape, projection_keys: List[str]\n) -> DataShape:\n dict_ = {k: ix for ix, (k, _) in enumerate(dshape_[0].fields)}\n return get_projection_dshape(dshape_, [dict_[key] for key in projection_keys])\n\n\ndef dshape_compatible(ds1: Optional[DataShape], ds2: DataShape) -> bool:\n if ds1 is None:\n return False\n assert isinstance(ds1, DataShape) and isinstance(ds2, DataShape)\n return True\n\n\n#\n# left = ds.dshape(\"{a: int, b: float32, c: string, d:string, e:string, f:int32, g:float32}\")\n# right = ds.dshape(\"{x: int, y: float32, z: string}\")\n# dshape_join(left, right)\n# right2 = ds.dshape(\"{x: int, y: float32, c:int32, z: string}\")\n# dshape_join(left, right2, lsuffix='_l', rsuffix='_r')\n# left2 = ds.dshape(\"{a: int, b: float32, c: string, d:string, y:float32, e:string, f:int32, g:float32}\")\n# dshape_join(left2, right, lsuffix='_l')\n# dshape_join(left2, right2)\n\n\ndef dshape_join(\n left: DataShape, right: DataShape, lsuffix: str = \"\", rsuffix: str = \"\"\n) -> Tuple[DataShape, Dict[str, Dict[str, str]]]:\n res = []\n rename: Dict[str, Dict[str, str]] = {\"left\": {}, \"right\": {}}\n suffix = {\"left\": lsuffix, \"right\": rsuffix}\n left_cols = left[0].fields\n keys, _ = zip(*left_cols)\n left_keys = set(keys)\n right_cols = right[0].fields\n keys, _ = zip(*right_cols)\n right_keys = set(keys)\n inter_keys = left_keys.intersection(right_keys)\n if inter_keys and not lsuffix and not rsuffix:\n raise ValueError(\"columns overlap in join without left/right suffixes\")\n len_left = len(left_keys)\n all_cols = left_cols + right_cols\n for i, (cname, ctype) in enumerate(all_cols):\n side = \"left\" if i < len_left else \"right\"\n if cname in inter_keys and suffix[side]:\n alias = cname + suffix[side]\n rename[side][cname] = alias\n else:\n alias = cname\n res.append((alias, ctype))\n ret = \"{\" + \",\".join([\"{}: {}\".format(f, t) for f, t in res]) + \"}\"\n return ds.dshape(ret), rename\n\n\ndef dshape_union(left: DataShape, right: DataShape) -> DataShape:\n res = []\n left_dict = dict(left[0].fields)\n left_keys = set(left_dict.keys())\n right_dict = dict(right[0].fields)\n right_keys = set(right_dict.keys())\n union_keys = sorted(left_keys.union(right_keys))\n for key in union_keys:\n # ctype = left_dict.get(key, right_dict[key]) # nice bug!\n ctype = left_dict[key] if key in left_dict else right_dict[key]\n res.append((key, ctype))\n return ds.dshape(\"{\" + \",\".join([\"{}: {}\".format(f, t) for f, t in res]) + \"}\")\n\n\ndef dshape_all_dtype(columns: List[str], dtype: np.dtype[Any]) -> DataShape:\n dshape = dshape_from_dtype(dtype)\n dshapes = [\"%s: %s\" % (column, dshape) for column in columns]\n return ds.dshape(\"{\" + \", \".join(dshapes) + \"}\")\n\n\nEMPTY_DSHAPE = ds.dshape(\"{}\")\n","sub_path":"progressivis/table/dshape.py","file_name":"dshape.py","file_ext":"py","file_size_in_byte":9756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"267075476","text":"#!/usr/bin/env python\n\nclass BSTNode(object):\n \n def __init__(self, parent, k):\n self.key = k\n self.parent = parent\n self.left = None\n self.right = None\n \n def _str(self):\n label = str(self.key)\n if self.left is None:\n left_lines, left_pos, left_width = [], 0, 0\n else:\n left_lines, left_pos, left_width = self.left._str()\n if self.right is None:\n right_lines, right_pos, right_width = [], 0, 0\n else:\n right_lines, right_pos, right_width = self.right._str()\n middle = max(right_pos + left_width - left_pos + 1, len(label), 2)\n pos = left_pos + middle // 2\n width = left_pos + middle + right_width - right_pos\n while len(left_lines) < len(right_lines):\n left_lines.append(' ' * left_width)\n while len(right_lines) < len(left_lines):\n right_lines.append(' ' * right_width)\n if (middle - len(label)) % 2 == 1 and self.parent is not None and \\\n self is self.parent.left and len(label) < middle:\n label += '.'\n label = label.center(middle, '.')\n if label[0] == '.': label = ' ' + label[1:]\n if label[-1] == '.': label = label[:-1] + ' '\n lines = [' ' * left_pos + label + ' ' * (right_width - right_pos),\n ' ' * left_pos + '/' + ' ' * (middle-2) +\n '\\\\' + ' ' * (right_width - right_pos)] + \\\n [left_line + ' ' * (width - left_width - right_width) + right_line\n for left_line, right_line in zip(left_lines, right_lines)]\n return lines, pos, width\n def __str__(self):\n return '\\n'.join(self._str()[0])\n\n def find(self, k):\n if k == self.key:\n return self\n elif k < self.key:\n if self.left is None:\n return None\n else:\n return self.left.find(k)\n else:\n if self.right is None: \n return None\n else:\n return self.right.find(k)\n \n def find_min(self):\n current = self\n while current.left is not None:\n current = current.left\n return current\n \n def next_larger(self):\n if self.right is not None:\n return self.right.find_min()\n current = self\n while current.parent is not None and current is current.parent.right:\n current = current.parent\n return current.parent\n\n def insert(self, node):\n if node is None:\n return\n if node.key < self.key:\n if self.left is None:\n node.parent = self\n self.left = node\n else:\n self.left.insert(node)\n else:\n if self.right is None:\n node.parent = self\n self.right = node\n else:\n self.right.insert(node)\n \n def delete(self):\n if self.left is None or self.right is None:\n if self is self.parent.left:\n self.parent.left = self.left or self.right\n if self.parent.left is not None:\n self.parent.left.parent = self.parent\n else:\n self.parent.right = self.left or self.right\n if self.parent.right is not None:\n self.parent.right.parent = self.parent\n return self\n else:\n s = self.next_larger()\n self.key, s.key = s.key, self.key\n return s.delete()\n \n def check_ri(self):\n if self.left is not None:\n if self.left.key > self.key:\n raise RuntimeError(\"BST RI violated by a left node key\")\n if self.left.parent is not self:\n raise RuntimeError(\"BST RI violated by a left node parent \"\n \"pointer\")\n self.left.check_ri()\n if self.right is not None:\n if self.right.key < self.key:\n raise RuntimeError(\"BST RI violated by a right node key\")\n if self.right.parent is not self:\n raise RuntimeError(\"BST RI violated by a right node parent \"\n \"pointer\")\n self.right.check_ri()\n\nclass MinBSTNode(BSTNode):\n def __init__(self, parent, key):\n super(MinBSTNode, self).__init__(parent, key)\n self.min = self\n \n def find_min(self):\n return self.min\n\n def insert(self, node):\n if node is None:\n return\n if node.key < self.key:\n # Updates the min of this node if the inserted node has a smaller\n # key.\n if node.key < self.min.key:\n self.min = node\n if self.left is None:\n node.parent = self\n self.left = node\n else:\n self.left.insert(node)\n else:\n if self.right is None:\n node.parent = self\n self.right = node\n else:\n self.right.insert(node)\n \n def delete(self):\n if self.left is None or self.right is None:\n if self is self.parent.left:\n self.parent.left = self.left or self.right\n if self.parent.left is not None:\n self.parent.left.parent = self.parent\n self.parent.min = self.parent.left.min\n else: \n self.parent.min = self.parent\n # Propagates the changes upwards.\n c = self.parent\n while c.parent is not None and c is c.parent.left:\n c.parent.min = c.min\n c = c.parent\n else:\n self.parent.right = self.left or self.right\n if self.parent.right is not None:\n self.parent.right.parent = self.parent\n return self\n else:\n s = self.next_larger()\n self.key, s.key = s.key, self.key\n return s.delete()\n\nclass BST():\n\n def __init__(self, klass = BSTNode):\n self.root = None\n self.klass = klass\n \n def __str__(self):\n if self.root is None: return ''\n return str(self.root)\n\n def find(self, k):\n return self.root and self.root.find(k)\n \n def find_min(self):\n return self.root and self.root.find_min()\n \n def insert(self, k):\n node = self.klass(None, k)\n if self.root is None:\n # The root's parent is None.\n self.root = node\n else:\n self.root.insert(node)\n return node\n \n def delete(self, k):\n node = self.find(k)\n if node is None:\n return None\n if node is self.root:\n pseudoroot = self.klass(None, 0)\n pseudoroot.left = self.root\n self.root.parent = pseudoroot\n deleted = self.root.delete()\n self.root = pseudoroot.left\n if self.root is not None:\n self.root.parent = None\n return deleted\n else:\n return node.delete() \n \n def next_larger(self, k):\n node = self.find(k)\n return node and node.next_larger()\n \n def check_ri(self):\n if self.root is not None:\n if self.root.parent is not None:\n raise RuntimeError()\n self.root.check_ri()\n\n ","sub_path":"BST.py","file_name":"BST.py","file_ext":"py","file_size_in_byte":7474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"483276678","text":"import re\nfrom spideroj.crawler.spiders import Spider\nfrom spideroj.crawler.field import Field\nfrom spideroj.crawler.processor import Cleaner\n\n\nclass LeetcodecnSpider(Spider):\n server_name = 'leetcodecn'\n js_support = True\n\n fields = [\n Field(\n name='Solved Question',\n xpath_selector='//*[@id=\"lc-content\"]/div/div/div[2]/div[2]/div[2]/div[4]/div[2]/span/text()',\n cleaner=Cleaner.get_fraction\n ),\n\n Field(\n name='Finished Contests',\n xpath_selector='//*[@id=\"lc-content\"]/div/div/div[2]/div[2]/div[2]/div[3]/p/text()',\n cleaner=lambda x: int(re.search(r'\\d+', x)[0])\n ),\n\n Field(\n name='AC Ranking',\n xpath_selector='//*[@id=\"lc-content\"]/div/div/div[1]/div/div[1]/div/div[3]/span/text()'\n ),\n\n Field(\n name='Accepted Submission',\n xpath_selector='//*[@id=\"lc-content\"]/div/div/div[2]/div[2]/div[2]/div[4]/div[3]/span/text()',\n cleaner=Cleaner.get_fraction\n ),\n\n Field(\n name='Acceptance Rate',\n xpath_selector='//*[@id=\"lc-content\"]/div/div/div[2]/div[2]/div[2]/div[4]/div[4]/span/text()',\n cleaner=Cleaner.get_percent\n ),\n\n Field(\n name='National Ranking',\n xpath_selector='/html/body/div[1]/div/div[2]/div/div/div[2]/div[2]/div[2]/div[3]/div[2]/div[3]/text()'\n ),\n\n Field(\n name='Global Ranking',\n xpath_selector='/html/body/div[1]/div/div[2]/div/div/div[2]/div[2]/div[2]/div[3]/div[3]/div[3]/text()'\n )\n\n # Field(\n # name='Contest Rating',\n # xpath_selector='/html/body/div[5]/div/div/div/div[2]/text()',\n # cleaner=lambda x: int(re.search(r'\\d+', x)[0])\n # )\n ]\n","sub_path":"spideroj/crawler/spiders/leetcodecn.py","file_name":"leetcodecn.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"461034713","text":"from contextlib import contextmanager\nfrom typing import (\n Dict,\n List\n)\nimport os\n\nimport boto3\nimport pytest\n\nfrom moto import mock_s3\n\nBUCKET_NAME = \"mock\"\nFILENAME = \"tests/resources/mock_file.csv\"\nEMPTY_FILE = \"tests/resources/empty.data\"\n\n\n@pytest.fixture(scope=\"module\")\ndef aws_credentials():\n os.environ[\"AWS_DEFAULT_REGION\"] = \"us-east-1\"\n os.environ[\"AWS_ACCESS_KEY_ID\"] = \"test\"\n os.environ[\"AWS_SECRET_ACCESS_KEY\"] = \"test\"\n os.environ[\"AWS_SECURITY_TOKEN\"] = \"test\"\n os.environ[\"AWS_SESSION_TOKEN\"] = \"test\"\n\n\n@pytest.yield_fixture(scope=\"module\")\ndef s3_client(aws_credentials):\n with mock_s3():\n session = boto3.session.Session()\n s3 = session.client(\"s3\")\n yield s3\n\n\n@contextmanager\ndef create_bucket(s3_client, bucket, key=None, data=None, keys_paths=[]):\n s3_client.create_bucket(Bucket=bucket)\n\n if key and data:\n s3_client.put_object(Bucket=bucket, Key=key, Body=data)\n\n for key, fn in keys_paths:\n s3_client.upload_file(Bucket=bucket, Key=key, Filename=fn)\n\n yield\n\n response = s3_client.list_objects_v2(Bucket=bucket)\n if \"Contents\" in response:\n for obj in response[\"Contents\"]:\n s3_client.delete_object(Bucket=bucket, Key=obj[\"Key\"])\n\n s3_client.delete_bucket(Bucket=bucket)\n\n\ndef create_files(path: str, files: Dict) -> List[str]:\n \"\"\"Create folder structure.\n\n The function creates a folder structure with files under a given path\n based on the dictionary passed as parameter.\n\n Parameters\n ----------\n path: str\n Root folder where the files/folders will be created.\n\n files: dict\n Dictionary with parametrization of files and folder to be created.\n A key is a folder if the value is an object, else\n A key is a file and the value will be the file content.\n\n Returns\n -------\n list[str]\n Path to all files/folders created.\n\n Examples\n --------\n To create the structure below\n\n root\n ├── file.root\n ├── folderA\n │   └── file.A2\n ├── folderB\n └── folderC\n └── folderC1\n └── file.CC1\n\n >>> files = {\n \"file.root\": \"This is the content of the file.root\",\n \"folderA\": {\n \"file.A2\": \"This is the content of the file.A2\"\n },\n \"folderB\": {\n },\n \"folderC\": {\n \"folderC1\": {\n \"file.CC1\": \"This is the content of the file.CC1\"\n },\n }\n }\n\n >>> create_files('root_folder', files)\n [\n 'root_folder/file.root',\n 'root_folder/folderA/file.A2',\n 'root_folder/folderC/folderC1/file.CC1'\n ]\n\n \"\"\"\n filepaths: List[str] = []\n os.makedirs(path, exist_ok=True)\n for key in files:\n if type(files[key]) is dict:\n filepaths.extend(\n create_files(os.path.join(path, key), files[key])\n )\n else:\n with open(os.path.join(path, key), 'w') as f:\n f.write(files[key])\n filepaths.append(os.path.join(path, key))\n\n return filepaths\n","sub_path":"tests/unit/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"369995535","text":"def main():\n numberOfRows, numberOfColumns = \\\n eval(input(\"Enter the number of rows and columns of the list: \"))\n \n a = []\n for i in range(numberOfRows):\n s = input(\"Enter a row : \") \n items = s.split() # Extracts items from the string\n list = [ eval(x) for x in items ] # Convert items to numbers \n a.append(list)\n \n location = locateLargest(a)\n print(\"The location of the largest element is at (\"\n + str(location[0]) + \", \" + str(location[1]) + \")\")\n \ndef locateLargest(a):\n location = 2 * [0]\n \n largest = a[0][0]\n for i in range(len(a)):\n for j in range(len(a[i])):\n if largest < a[i][j]:\n largest = a[i][j]\n location[0] = i\n location[1] = j\n\n return location\n\nmain()\n","sub_path":"python/python语法/pyexercise/Exercise11_13.py","file_name":"Exercise11_13.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"425213731","text":"import tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndata = keras.datasets.fashion_mnist\n\n(train_images, train_labels), (test_images, test_labels) = data.load_data()\n\nclass_names = ['T-shirt', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', \n'Shirt', 'Snearker', 'Bag', 'Ankle boot']\n\ntrain_images = train_images/255.0\ntest_images = test_images/255.0\n\n##print(train_images[7])\n##plt.imshow(train_ismages[7], cmap=plt.cm.binary)\n##plt.show()\n\nmodel = keras.Sequential([\n\tkeras.layers.Flatten(input_shape=(28,28)),\n\tkeras.layers.Dense(128,activation=\"relu\"), \n\tkeras.layers.Dense(10, activation=\"softmax\"), ## 10 neurons with activation function of soft max ~> pick values for each neurons that all \n\t## neurons add up to 1 => almost like a probability function \n\t])\n\nmodel.compile(optimizer=\"adam\", loss=\"sparse_categorical_crossentropy\", metrics=[\"accuracy\"])\n\n## training \nmodel.fit(train_images, train_labels, epochs=5) ## how many times the model will see the train images\n\n#test_loss, test_acc = model.evaluate(test_images, test_labels)\n\n#print(\"Tested Acc : \", test_acc)\n\nprediction = model.predict(test_images)\n\nfor i in range(5):\n\tplt.grid(False)\n\tplt.imshow(test_images[i], cmap=plt.cm.binary)\n\tplt.xlabel(\"Actual: \" + class_names[test_labels[i]])\n\tplt.title(\"Prediction \" + class_names[np.argmax(prediction[i])])\n\tplt.show()\n\n","sub_path":"fashionmnist.py","file_name":"fashionmnist.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"236564549","text":"from flask_assets import Bundle, Environment\n\nCSS = (\n \"vendor/fontawesome-free/css/all.min.css\",\n \"vendor/datatables/dataTables.bootstrap4.css\",\n \"css/sb-admin.css\"\n)\n\nJS = (\n \"vendor/jquery/jquery.js\",\n \"vendor/bootstrap/js/bootstrap.bundle.min.js\",\n \"vendor/jquery-easing/jquery.easing.min.js\",\n \"js/sb-admin.min.js\",\n)\n\n\ndef register_bundles(env: Environment):\n js_asset = Bundle(*JS, filters=\"jsmin\", output=\"packed.js\")\n css_asset = Bundle(*CSS, filters=\"cssmin\", output=\"packed.css\")\n\n env.register(\"js\", js_asset)\n env.register(\"css\", css_asset)","sub_path":"tavernk/app/assets.py","file_name":"assets.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"255149461","text":"from hy.core.language import first, last\nfrom keras.callbacks import LambdaCallback\nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM\nfrom keras.optimizers import RMSprop\nfrom keras.utils.data_utils import get_file\nimport numpy as np\nimport random\nimport sys\nimport io\npath = get_file('nietzsche.txt', origin=\n 'https://s3.amazonaws.com/text-datasets/nietzsche.txt')\n_hy_anon_var_1 = None\nwith io.open(path, encoding='utf-8') as f:\n text = f.read()\n _hy_anon_var_1 = None\nprint('corpus length:', len(text))\nchars = sorted(list(set(text)))\nprint('total chars (unique characters in input text):', len(chars))\nchar_indices = dict([(last(i), first(i)) for i in enumerate(chars)])\nindices_char = dict([i for i in enumerate(chars)])\nmaxlen = 40\nstep = 3\nsentences = list()\nnext_chars = list()\nprint('Create sentences and next_chars data...')\nfor i in range(0, len(text) - maxlen, step):\n sentences.append(text[i:i + maxlen:None])\n next_chars.append(text[i + maxlen])\nprint('Vectorization...')\nx = np.zeros([len(sentences), maxlen, len(chars)], dtype=np.bool)\ny = np.zeros([len(sentences), len(chars)], dtype=np.bool)\nfor [i, sentence] in [j for j in enumerate(sentences)]:\n for [t, char] in [j for j in enumerate(sentence)]:\n x[i][t][char_indices[char]] = 1\n y[i][char_indices[next_chars[i]]] = 1\nprint('Done creating one-hot encoded training data.')\nprint('Building model...')\nmodel = Sequential()\nmodel.add(LSTM(128, input_shape=[maxlen, len(chars)]))\nmodel.add(Dense(len(chars), activation='softmax'))\noptimizer = RMSprop(0.01)\nmodel.compile(loss='categorical_crossentropy', optimizer=optimizer)\n\n\ndef sample(preds, temperature=1.0):\n preds = np.array(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)\n\n\ndef on_epoch_end(epoch, not_used=None):\n print()\n print('----- Generating text after Epoch:', epoch)\n start_index = random.randint(0, len(text) - maxlen - 1)\n for diversity in [0.2, 0.5, 1.0, 1.2]:\n print('----- diversity:', diversity)\n generated = ''\n sentence = text[start_index:start_index + maxlen:None]\n generated = generated + sentence\n print('----- Generating with seed:', sentence)\n sys.stdout.write(generated)\n for i in range(400):\n x_pred = np.zeros([1, maxlen, len(chars)])\n for [t, char] in [j for j in enumerate(sentence)]:\n x_pred[0][t][char_indices[char]] = 1\n preds = first(model.predict(x_pred, verbose=0))\n print('** preds=', preds)\n next_index = sample(preds, diversity)\n next_char = indices_char[next_index]\n sentence = sentence[1:None:None] + next_char\n sys.stdout.write(next_char)\n sys.stdout.flush()\n print()\n\n\nprint_callback = LambdaCallback(on_epoch_end=on_epoch_end)\nmodel.fit(x, y, batch_size=128, epochs=60, callbacks=[print_callback])\n\n","sub_path":"examples_translated_to_python/deeplearning/lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"584314348","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport re\n\n\ndef normalize_keys(input: dict, expected: list):\n for key in list(input.keys()):\n for exp_key in expected:\n reg = re.compile('[^a-zA-Z]')\n if reg.sub('', key).lower() == exp_key:\n input[exp_key] = input.pop(key)\n break\n","sub_path":"request_handler/app/auxiliary/file_handlers/keys_normalizer.py","file_name":"keys_normalizer.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"245343193","text":"\n\"\"\"Control bit fields for the Flow Control Branch Detector\"\"\"\n\nimport Dyadic_Operators as Dyadic\nfrom bitstring import BitArray\n\n# From Verilog code\ntotal_op_width = 31\n\norigin_enabled = BitArray(\"0b1\")\norigin_disabled = BitArray(\"0b0\")\npredict_taken = BitArray(\"0b1\")\npredict_not_taken = BitArray(\"0b0\")\npredict_enabled = BitArray(\"0b1\")\npredict_disabled = BitArray(\"0b0\")\nA_flag_negative = BitArray(\"0b00\")\nA_flag_carryout = BitArray(\"0b01\")\nA_flag_sentinel = BitArray(\"0b10\")\nA_flag_external = BitArray(\"0b11\")\nB_flag_lessthan = BitArray(\"0b00\")\nB_flag_counter = BitArray(\"0b01\")\nB_flag_sentinel = BitArray(\"0b10\")\nB_flag_external = BitArray(\"0b11\")\n\norigin_width = 10\norigin_enable_width = 1\ndestination_width = 10\npredict_taken_width = 1\npredict_enable_width = 1\nA_flag_width = 2\nB_flag_width = 2\nAB_operator_width = Dyadic.op_width\ncondition_width = A_flag_width + B_flag_width + AB_operator_width\n\n\nassert (origin_width + origin_enable_width + destination_width + predict_taken_width + predict_enable_width + condition_width) == total_op_width, \"ERROR: Branch Detector control word width and sum of control bits widths do not agree\"\n\nif __name__ == \"__main__\":\n print(A_flag_sentinel.bin)\n\n","sub_path":"Assembler/Branch_Detector_Operators.py","file_name":"Branch_Detector_Operators.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"557328088","text":"# Copyright 2020 The Jetstack cert-manager contributors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nload(\"@io_bazel_rules_docker//container:container.bzl\", \"container_pull\")\n\nUBI_BASE_IMAGES = {\n # Pinned to release https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8-minimal/images/8.1-407\n # Ensure you update _all_ image digests when upgrading the UBI base image.\n \"amd64\": {\n \"digest\": \"sha256:39df7365f1343e9a49132f96edd852ddb80e4dcdec03ef8fe1779acb5418d37e\",\n },\n \"arm64\": {\n \"digest\": \"sha256:2166f0122117868485b429170d0848b2da566c20a61e517d44d059c360e2ed2b\",\n \"variant\": \"v8\",\n },\n \"ppc64le\": {\n \"digest\": \"sha256:e55721eb97b2517542b695c3ad36e9534fb8f7a8641d06b2ad87e802e36dd8d2\",\n },\n \"s390x\": {\n \"digest\": \"sha256:d41676554f34c417c82a016c94790179fb4116063547b757ec7a65a52235c9c8\",\n },\n}\n\ndef define_base_images():\n ## Use 'static' distroless image for all builds\n container_pull(\n name = \"static_base\",\n registry = \"gcr.io\",\n repository = \"distroless/static\",\n digest = \"sha256:cd0679a54d2abaf3644829f5e290ad8a10688847475f570fddb9963318cf9390\",\n )\n\n [container_pull(\n name = \"com_redhat_access_registry_ubi8_ubi_minimal-%s\" % arch,\n registry = \"registry.access.redhat.com\",\n repository = \"ubi8/ubi-minimal\",\n architecture = arch,\n digest = meta[\"digest\"],\n cpu_variant = meta.get(\"variant\", None),\n ) for arch, meta in UBI_BASE_IMAGES.items()]\n","sub_path":"build/images.bzl","file_name":"images.bzl","file_ext":"bzl","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"97313065","text":"from django.conf.urls import url\nfrom . import views\n\napp_name = 'login'\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^process$', views.process, name='create'),\n # url(r'^success$', views.show),\n # url(r'^logout$', views.logout),\n]\n","sub_path":"Python/django/belt_review2/apps/login/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"168607662","text":"##\n## Programación con Pandas\n## ===========================================================================\n##\n## Construya una tabla que contenga _c1 y una lista\n## separada por ':' de los valores de la columna _c2\n## para el archivo `tbl0.tsv`.\n##\n## Rta/\n## _c0 lista\n## 0 A 1:1:2:3:6:7:8:9\n## 1 B 1:3:4:5:6:8:9\n## 2 C 0:5:6:7:9\n## 3 D 1:2:3:5:5:7\n## 4 E 1:1:2:3:3:4:5:5:5:6:7:8:8:9\n## \n## >>> Escriba su codigo a partir de este punto <<<\n##\nimport pandas\nimport os\nimport re\nimport string\nos.chdir(\"/app/Laboratorios/04-pandas=1/\")\ndatos = pandas.read_csv(\"./q08=1/tbl0.tsv\", sep=\"\\t\")\nletras = pandas.unique(datos[\"_c1\"].values).tolist()\nletras.sort()\ndef asociadas(num_parametro):\n temp = []\n for i in list(range(0,len(datos[\"_c1\"]))):\n if datos.iloc[i, 1] == num_parametro:\n temp.append(datos.iloc[i, 2])\n temp.sort()\n return(temp)\nnumeros = [asociadas(j) for j in letras]\ntabla = [(str(letras[i]), str(numeros[i]).replace(\",\",\":\")) for i in list(range(0,len(letras)))]\ntabla = pandas.DataFrame(tabla, columns=[\"_c0\", \"lista\"])\ntabla[\"lista\"] = [str(tabla.iloc[i, 1]).replace(\"[\", \"\") for i in list(range(0,len(tabla[\"lista\"])))]\ntabla[\"lista\"] = [str(tabla.iloc[i, 1]).replace(\"]\", \"\") for i in list(range(0,len(tabla[\"lista\"])))]\ntabla[\"lista\"] = [str(tabla.iloc[i, 1]).replace(\" \", \"\") for i in list(range(0,len(tabla[\"lista\"])))]\nprint(tabla)","sub_path":"04-pandas=1/q08=1/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"235358486","text":"import sys\nimport re\nimport itertools\nimport gzip\nimport argparse\nimport os\n\n\ndef arg_parser():\n general_parser = argparse.ArgumentParser()\n general_parser.add_argument(\"-i1\", \"--input1\", help=\"Input fastq file 1, gzipped\", required=True)\n general_parser.add_argument(\"-i2\", \"--input2\", help=\"Input fastq file 2, gzipped\", required=True)\n\n general_parser.add_argument(\"-x1\", \"--index1\", help=\"Input fastq with 1 index, gzipped\", required=True)\n general_parser.add_argument(\"-x2\", \"--index2\", help=\"Input fastq with 2 index, gzipped\", required=True)\n\n general_parser.add_argument(\"-o1\", \"--output1\", help=\"Output fastq file 1, gzipped\", default=\"filtered_1.fastq.gz\")\n general_parser.add_argument(\"-o2\", \"--output2\", help=\"Output fastq file 2, gzipped\", default=\"filtered_2.fastq.gz\")\n\n general_parser.add_argument(\"-f1\", \"--filter1\", help=\"List of filtering criteria for index 1\", required=True, nargs='+')\n general_parser.add_argument(\"-f2\", \"--filter2\", help=\"List of filtering criteria for index 2\", required=True, nargs='+')\n\n general_parser.add_argument(\"-m\", \"--mismatch\", help=\"Maximum mismatch number in index. Default 1\", type=int, default=1)\n\n general_parser.add_argument(\"-s\", \"--start\", help=\"Start position of the scanning interval. Default 0 (from the beginning)\", type=check_not_negative, default=0)\n general_parser.add_argument(\"-l\", \"--length\", help=\"Length of the scanning interval. Default 999 (till the end)\", type=check_positive, default=999)\n return general_parser\n\n\ndef check_not_negative(value):\n ivalue = int(value)\n if ivalue < 0:\n raise argparse.ArgumentTypeError(\"%s - should be >= 0\" % value)\n return ivalue\n\n\ndef check_positive(value):\n ivalue = int(value)\n if ivalue <= 0:\n raise argparse.ArgumentTypeError(\"%s - should be > 0\" % value)\n return ivalue\n\n\ndef normalize_args(args, skip_list=[]):\n \"\"\"Converts all relative path arguments to absolute ones relatively to the current working directory\"\"\"\n normalized_args = {}\n for key,value in args.__dict__.items():\n if key not in skip_list:\n normalized_args[key] = value if not value or os.path.isabs(value) else os.path.normpath(os.path.join(os.getcwd(), value))\n else:\n normalized_args[key]=value\n return argparse.Namespace (**normalized_args)\n\n\ndef get_compiled_regex(target_patterns, max_mismatch_counts):\n regex_list = []\n for pattern in target_patterns:\n pattern_l = list(pattern)\n combinations = itertools.combinations(range(len(pattern_l)), max_mismatch_counts) \n for combination in combinations:\n pattern_l_copy = pattern_l[:]\n for i in combination:\n pattern_l_copy[i]=\"$\"\n regex_list.append(''.join(pattern_l_copy).replace(\"$\", \".?\"))\n return re.compile(\"|\".join(regex_list), re.IGNORECASE)\n\n\ndef run_filtering(args):\n print(\"Filter input files:\\n\", args.input1, \"\\n\", args.input2)\n print(\"Index files:\\n\", args.index1, \"\\n\", args.index2)\n print(\"Filtering criteria:\\n\", args.filter1, \"\\n\", args.filter2)\n print(\"Max mismatches:\", args.mismatch)\n print(\"Scanning interval ranges\", args.start, args.start+args.length)\n \n compiled_regex1 = get_compiled_regex(args.filter1, args.mismatch)\n compiled_regex2 = get_compiled_regex(args.filter2, args.mismatch)\n total = 0\n with gzip.open(args.input2,'rb') as f2_input_stream:\n for l in f2_input_stream:\n total += 1\n total = int(total / 4)\n print(\"Total number of read:\", total)\n\n count = 0\n with gzip.open(args.input1,'rb') as f1_input_stream:\n with gzip.open(args.input2,'rb') as f2_input_stream:\n with gzip.open(args.index1,'rb') as index1_stream:\n with gzip.open(args.index2,'rb') as index2_stream:\n with gzip.open(args.output1, 'wb') as f1_output_stream:\n with gzip.open(args.output2, 'wb') as f2_output_stream:\n f1_read_data = []\n f2_read_data = []\n index1_data = []\n index2_data = []\n for f1_line, f2_line, index1_line, index2_line in zip(f1_input_stream, f2_input_stream, index1_stream, index2_stream):\n f1_read_data.append(f1_line.decode(\"utf-8\").rstrip())\n f2_read_data.append(f2_line.decode(\"utf-8\").rstrip())\n index1_data.append(index1_line.decode(\"utf-8\").rstrip())\n index2_data.append(index2_line.decode(\"utf-8\").rstrip())\n if len(f1_read_data) == 4 and len(f2_read_data) == 4 and len(index1_data) == 4 and len(index2_data) == 4:\n count += 1\n f1_record = {k: v for k, v in zip(['name', 'sequence', 'strand', 'quality'], f1_read_data)}\n f2_record = {k: v for k, v in zip(['name', 'sequence', 'strand', 'quality'], f2_read_data)}\n index1_record = {k: v for k, v in zip(['name', 'sequence', 'strand', 'quality'], index1_data)}\n index2_record = {k: v for k, v in zip(['name', 'sequence', 'strand', 'quality'], index2_data)}\n scanned1_seq = index1_record[\"sequence\"][args.start:args.start+args.length]\n scanned2_seq = index2_record[\"sequence\"][args.start:args.start+args.length]\n if compiled_regex1.search(scanned1_seq) and compiled_regex2.search(scanned2_seq):\n f1_record[\"name\"] = f1_record[\"name\"].replace(\" \", \"_\") + \"_\" + index1_record[\"sequence\"] + \"_\" + index2_record[\"sequence\"]\n f2_record[\"name\"] = f2_record[\"name\"].replace(\" \", \"_\") + \"_\" + index1_record[\"sequence\"] + \"_\" + index2_record[\"sequence\"]\n f1_output_stream.write((\"\\n\".join([ f1_record[\"name\"], f1_record[\"sequence\"], f1_record[\"strand\"], f1_record[\"quality\"]])+\"\\n\").encode(\"utf-8\"))\n f2_output_stream.write((\"\\n\".join([ f2_record[\"name\"], f2_record[\"sequence\"], f2_record[\"strand\"], f2_record[\"quality\"]])+\"\\n\").encode(\"utf-8\"))\n f1_read_data = []\n f2_read_data = []\n index1_data = []\n index2_data = []\n if count % 10000 == 0:\n print(\"Reads processed:\", count, \"/\", total, \"(\", int(float(count)/float(total)*100) ,\"%\",\")\")\n\n\ndef main(argsl=None):\n if argsl is None:\n argsl = sys.argv[1:]\n args,_ = arg_parser().parse_known_args(argsl)\n args = normalize_args(args, [\"filter1\", \"filter2\", \"mismatch\", \"start\", \"length\"])\n run_filtering(args)\n print(\"Finished successfully\")\n\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv[1:]))","sub_path":"filter_fastq_by_index.py","file_name":"filter_fastq_by_index.py","file_ext":"py","file_size_in_byte":7191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"194121256","text":"import qcfractal.interface as portal\n\n# Build a interface to the server \nclient = portal.FractalClient(\"localhost:7777\", verify=False)\n\n# Add a HOOH\nhooh = portal.data.get_molecule(\"hooh.json\")\n\n# Geometric options\ntdinput = {\n \"initial_molecule\": [hooh],\n \"keywords\": {\n \"dihedrals\": [[0, 1, 2, 3]],\n \"grid_spacing\": [90]\n },\n \"optimization_spec\": {\n \"program\": \"geometric\",\n \"keywords\": {\n \"coordsys\": \"tric\",\n }\n },\n \"qc_spec\": {\n \"driver\": \"gradient\",\n \"method\": \"UFF\",\n \"basis\": None,\n \"keywords\": None,\n \"program\": \"rdkit\",\n },\n}\n\n# Compute!\nret = client.add_service([tdinput])\n\nprint(ret)\n","sub_path":"examples/parsl_torsiondrive/compute_torsion.py","file_name":"compute_torsion.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"403595332","text":"import math\n\ndef readandwritefile(filein, fileout):\n\twith open(filein) as f:\n\t\tlines = f.readlines()\n\t\tnumcases = int(lines[0].rstrip())\n\t\tlinestowrite = []\n\t\tfor i in range(1, (numcases+1)):\n\t\t\tlinestowrite.append(solve(lines[i].rstrip().split()))\n\t\twriteanswer(linestowrite, fileout)\n\t\tf.close()\n\ndef writeanswer(lines, file):\n\twith open(file, 'w+') as f:\n\t\tfor i in range(0 ,len(lines)):\n\t\t\tline = lines[i]\n\t\t\tf.write(\"Case #\" + str(i+1) + \": \" + line)\n\t\t\tif(i+1 < len(lines)):\n\t\t\t\tf.write(\"\\n\")\n\t\tf.close()\n\n\n\n\n\n\ndef solve(params):\n\tword = params[0]\n\tentry = ''\n\tfor letter in word:\n\t\tif(entry == ''):\n\t\t\tentry += letter\n\t\telse:\n\t\t\tif(entry[0] <= letter):\n\t\t\t\tentry = letter + entry\n\t\t\telse:\n\t\t\t\tentry = entry + letter\n\treturn entry\n\n\n\n\nreadandwritefile('file.in', 'file.out')","sub_path":"solutions_5631989306621952_0/Python/A13xand3r/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"612728030","text":"from .utils import *\nimport datetime as dt\nimport numpy as np\n\nclass Timeseries:\n def __init__(self):\n self.source = \"\"\n self.data = []\n self.start = dt.datetime.max\n self.timestep = dt.timedelta(days=1)\n\n def __get_count(self):\n return len(self.data)\n\n def __get_end(self):\n return self.start + self.timestep * (self.count - 1)\n\n def __get_min(self):\n if (len(self.data) == 0):\n return self.MISSING_VALUE\n else:\n return min(self.data)\n\n def __get_max(self):\n if (len(self.data) == 0):\n return self.MISSING_VALUE\n else:\n return max(self.data)\n\n def __get_mean(self):\n if (len(self.data) == 0):\n return self.MISSING_VALUE\n else:\n return np.nanmean(self.data)\n\n def __get_sum(self):\n return np.nansum(self.data)\n\n def __get_std(self):\n if (len(self.data) == 0):\n return self.MISSING_VALUE\n else:\n return np.nanstd(self.data)\n\n def __get_missing(self):\n return count_missing(self.data)\n\n def __get_nonmissing(self):\n return self.length - self.missing\n\n def __add__(self, other):\n self_clone = self.clone()\n self_clone.add(other)\n return self_clone\n\n def __radd__(self, other):\n answer = self + other\n return answer\n\n def __sub__(self, other):\n answer = self + -1 * other\n return answer\n\n def __rsub__(self, other):\n if not is_a_number(other):\n raise Exception(\"Cannot subtract timeseries from non-number.\")\n answer = self.clone()\n for i in range(answer.length):\n answer.data[i] = other - self.data[i]\n return answer\n\n def __neg__(self):\n self_clone = self.clone()\n for i in range(self.length):\n self_clone.data[i] = -self_clone.data[i]\n return self_clone\n\n def __pow__(self, other):\n if not is_a_number(other):\n raise Exception(\"Cannot apply exponent which is a non-number.\")\n self_clone = self.clone()\n for i in range(self.length):\n self_clone.data[i] = self_clone.data[i]**other\n return self_clone\n\n def __mul__(self, other):\n self_clone = self.clone()\n if is_a_number(other):\n self_clone.scale(other)\n elif isinstance(other, Timeseries):\n self_clone.mul_timeseries(other)\n else:\n raise Exception(\"Dont know how to multiply this object.\")\n return self_clone\n\n def __rmul__(self, other):\n answer = self * other\n return answer\n\n def __div__(self, other):\n return self * other**-1\n\n def __truediv__(self, other):\n return self * other**-1\n\n def clone(self):\n answer = Timeseries()\n answer.source = self.source\n answer.data = self.data.copy()\n answer.start = self.start\n answer.timestep = self.timestep\n return answer\n\n def summary(self):\n print(\"Source: \" + self.source)\n print(\"Start: \" + str(self.start))\n print(\"End: \" + str(self.end))\n print(\"Count: \" + str(self.count))\n print(\"Missing: \" + str(self.missing))\n print(\"Min: \" + str(self.min))\n print(\"Max: \" + str(self.max))\n print(\"Mean: \" + str(self.mean))\n print(\"StdDev: \" + str(self.std))\n\n def scale(self, factor):\n for i in range(len(self.data)):\n self.data[i] = self.data[i] * factor\n return self\n\n def add(self, value):\n if is_a_number(value):\n for i in range(len(self.data)):\n self.data[i] = self.data[i] + value\n elif isinstance(value, Timeseries):\n self.add_timeseries(value)\n else:\n raise Exception(\"Dont know how to add this object.\")\n return self\n\n def add_timeseries(self, other):\n new_start = min(self.start, other.start)\n new_end = max(self.end, other.end)\n self.set_start_end([new_start, new_end])\n all_dates = self.get_dates()\n for i in range(self.length):\n o = other.get_value(0, 0, 0, date=all_dates[i])\n self.data[i] += o\n return self\n\n def mul_timeseries(self, other):\n new_start = min(self.start, other.start)\n new_end = max(self.end, other.end)\n self.set_start_end([new_start, new_end])\n all_dates = self.get_dates()\n for i in range(self.length):\n o = other.get_value(0, 0, 0, date=all_dates[i])\n self.data[i] *= o\n return self\n\n def scale_monthly(self, seasonal_factors=None):\n #Resample 12 factors from the provided seasonal_factors list\n monthly_factors = []\n n = len(seasonal_factors)\n if n==1 or n==2 or n==3 or n==4 or n==6 or n==12:\n for i in range(12):\n j = math.ceil(n*(i + 1.0)/12.0) - 1\n monthly_factors.append(seasonal_factors[j])\n else:\n raise Exception(\"seasonal_factors must have length 1, 2, 3, 4, 6, or 12.\")\n dates = self.get_dates()\n for i in range(self.length):\n self.data[i] = self.data[i] * monthly_factors[dates[i].month - 1]\n return self\n\n def get_value(self, year, month, day, date=None):\n if (date == None):\n date = dt.datetime(year, month, day)\n i = int(period_length(self.start, date, self.timestep))\n if (i >= 0 and i < self.count):\n return self.data[i]\n return self.MISSING_VALUE\n\n def set_value(self, value, year, month, day, date=None):\n if (date == None):\n date = dt.datetime(year, month, day)\n i = int(period_length(self.start, date, self.timestep))\n if (i >= 0 and i < self.count):\n self.data[i] = value\n else:\n raise Exception(\"Specified date is outside the timeseries range.\")\n\n def set_value_if_missing(self, value, year, month, day, date=None):\n if (date == None):\n date = dt.datetime(year, month, day)\n i = int(period_length(self.start, date, self.timestep))\n if (i >= 0 and i < self.count):\n if math.isnan(self.data[i]):\n self.data[i] = value\n else:\n raise Exception(\"Specified date is outside the timeseries range.\")\n\n def get_copy_of_data(self):\n copy_of_data = self.data.copy()\n return copy_of_data\n\n def get_dates(self, start=None, end=None, timestep=None):\n if (start == None):\n start = self.start\n if (end == None):\n end = self.end\n if (timestep == None):\n timestep = self.timestep\n dates = []\n d = start\n while d <= end:\n dates.append(d)\n d = d + timestep\n return dates\n\n def get_start_end(self):\n return [self.start, self.end]\n\n def set_start_end(self, start_and_end):\n self.set_start(0, 0, 0, start_and_end[0])\n self.set_end(0, 0, 0, start_and_end[1])\n return self\n\n def set_start(self, year, month, day, date=None):\n if (date == None):\n date = dt.datetime(year, month, day)\n start_offset = int(period_length(self.start, date, self.timestep))\n new_length = max(0, self.length - start_offset)\n append_to_start = max(0, new_length - self.length)\n trim_from_start = max(0, start_offset)\n self.data = [self.MISSING_VALUE] * append_to_start + self.data[trim_from_start:]\n self.start = date\n return self\n\n def set_end(self, year, month, day, date=None):\n if (date == None):\n date = dt.datetime(year, month, day)\n new_length = max(0, 1 + int(period_length(self.start, date, self.timestep)))\n append_to_end = max(0, new_length - self.length)\n self.data = self.data[:new_length] + [self.MISSING_VALUE] * append_to_end\n return self\n\n def bias(self, other):\n \"\"\"\n Returns the bias on overlapping data.\n \"\"\"\n count = 0\n stot = 0.0\n otot = 0.0\n for d in self.get_dates():\n s = self.get_value(0, 0, 0, date=d)\n o = other.get_value(0, 0, 0, date=d)\n if not math.isnan(s + o):\n count = count + 1\n stot = stot + s\n otot = otot + o\n if count == 0:\n return math.nan\n return stot / otot\n\n def nse(self, other):\n \"\"\"\n Nash Sutcliffe Efficiency\n https://en.wikipedia.org/wiki/Nash%E2%80%93Sutcliffe_model_efficiency_coefficient\n \"\"\"\n answer = 1 - ((other - self)**2).mean / ((other - other.mean)**2).mean\n return answer\n\n def pearsons_r(self, other):\n \"\"\"\n Pearson's R correlation coefficient\n https://en.wikipedia.org/wiki/Pearson_correlation_coefficient\n \"\"\"\n _self = self + 0 * other #common period\n _other = other + 0 * self #common period\n numerator = ((_self - _self.mean) * (_other - _other.mean)).sum\n denominator = ((_self - _self.mean)**2).sum**0.5 * ((_other - _other.mean)**2).sum**0.5\n answer = numerator / denominator\n return answer\n\n def compare_start(self, other):\n return (self.start == other.start)\n\n def compare_end(self, other):\n return (self.end == other.end)\n\n def compare_timestep(self, other):\n return (self.timestep == other.timestep)\n\n def compare_length(self, other):\n return (self.length == other.length)\n\n def compare_nonmissing(self, other, epsilon=0.0):\n \"\"\"\n Returns true if all non-missing values are also non-missing in the\n other timeseries. Otherwise returns false.\n \"\"\"\n for d in self.get_dates():\n s = self.get_value(0, 0, 0, date=d)\n o = other.get_value(0, 0, 0, date=d)\n if (abs(o - s) > epsilon): #nan's will be false\n return False\n return True\n\n def compare_missing(self, other):\n \"\"\"\n Returns true if all missing values (explicitly marked missing) are\n also missing (explicitly marked missing) from the other series.\n Otherwise returns false.\n \"\"\"\n if (self.start != other.start or self.timestep != other.timestep or self.length != other.length):\n return False\n for i in range(len(self.data)):\n if math.isnan(self.data[i]) ^ math.isnan(other.data[i]):\n return False\n return True\n\n def compare(self, other, epsilon=0.0):\n start_is_same = self.compare_start(other)\n end_is_same = self.compare_end(other)\n timestep_is_same = self.compare_timestep(other)\n length_is_same = self.compare_length(other)\n missing_is_same = self.compare_missing(other)\n nonmissing_is_same = self.compare_nonmissing(other, epsilon=epsilon)\n bias = self.bias(other)\n all_same = (start_is_same and end_is_same and\n timestep_is_same and length_is_same and\n missing_is_same and nonmissing_is_same)\n print(\"All same: \" + str(all_same))\n print(\"Start date is same: \" + str(start_is_same))\n print(\"End date is same: \" + str(end_is_same))\n print(\"Timestep is same: \" + str(timestep_is_same))\n print(\"Length is same: \" + str(length_is_same))\n print(\"Missing are same: \" + str(missing_is_same))\n print(\"Nonmissing are same: \" + str(nonmissing_is_same))\n print(\"Bias: \" + str(bias))\n return [all_same, start_is_same, end_is_same, timestep_is_same,\n length_is_same, missing_is_same, nonmissing_is_same]\n\n def date_of_first_data(self):\n for i in range(self.length):\n if not math.isnan(self.data[i]):\n date = self.start + i * self.timestep\n return date\n return None\n\n def date_of_last_data(self):\n l = self.length\n for i in range(l):\n j = l - i - 1\n if not math.isnan(self.data[j]):\n date = self.start + j * self.timestep\n return date\n return None\n\n def infill_merge(self, other):\n if (self.timestep != other.timestep):\n raise Exception(\"Cannot infill due to differing timesteps.\")\n new_start = min(self.start, other.start)\n new_end = max(self.end, other.end)\n self.set_start_end([new_start, new_end])\n for d in other.get_dates():\n s = self.get_value(0, 0, 0, date=d)\n if math.isnan(s):\n o = other.get_value(0, 0, 0, date=d)\n self.set_value(o, 0, 0, 0, date=d)\n return self\n\n def infill_scale(self, other, factor=None):\n if (self.timestep != other.timestep):\n raise Exception(\"Cannot infill due to differing timesteps.\")\n if factor == None:\n factor = self.bias(other)\n other_clone = other.clone()\n other_clone.scale(factor)\n self.infill_merge(other_clone)\n return self\n\n def infill_scalemonthly(self, other, factors=None):\n if (self.timestep != other.timestep):\n raise Exception(\"Cannot infill due to differing timesteps.\")\n if factors == None:\n factors = self.get_wt93_factors(other)\n other_clone = other.clone()\n other_clone.scale_monthly(factors)\n self.infill_merge(other_clone)\n return self\n\n def infill(self, other, method=\"MERGE\"):\n method = method.upper()\n if (method==\"MERGE\" or method==\"DIRECT\"):\n self.infill_merge(other)\n elif (method==\"SCALE\" or method==\"FACTOR\"):\n self.infill_scale(other)\n elif (method==\"SCALE_MONTHLY\" or method==\"WT93B\"):\n self.infill_scalemonthly(other)\n else:\n raise Exception(\"Undefined infilling method: \" + str(method))\n return self\n\n def infill_wt93b(self, others):\n factors = []\n for i in range(len(others)):\n factors.append(self.get_wt93_factors(others[i]))\n for i in range(len(others)):\n self.infill_scalemonthly(others[i], factors=factors[i])\n return self\n\n def get_wt93_factors(self, other):\n if (self.timestep != other.timestep or self.timestep != dt.timedelta(1)):\n raise Exception(\"WT93 only supports daily timesteps.\")\n #Clone self and other and align their periods\n start = dt.datetime(max(self.start.year, other.start.year), 1, 1)\n end = dt.datetime(min(self.end.year, other.end.year) + 1, 1, 1)\n s = self.clone().set_start_end([start, end])\n o = other.clone().set_start_end([start, end])\n #Calculate totals over corresponding complete months\n d = s.get_dates()\n m = 12\n s_monthly_totals = [0] * 12; o_monthly_totals = [0] * 12\n s_cum = 0; o_cum = 0\n for i in range(len(d)):\n if (m != d[i].month):\n if not math.isnan(s_cum + o_cum):\n s_monthly_totals[m - 1] += s_cum\n o_monthly_totals[m - 1] += o_cum\n s_cum = 0; o_cum = 0\n m = d[i].month\n s_cum += s.data[i]\n o_cum += o.data[i]\n #Calculate factors\n answer = [0] * 12\n for i in range(12):\n answer[i] = s_monthly_totals[i] / o_monthly_totals[i]\n return answer\n\n\n\n\n\n\n MISSING_VALUE = float(\"nan\")\n count = property(__get_count)\n length = property(__get_count)\n end = property(__get_end)\n min = property(__get_min)\n max = property(__get_max)\n mean = property(__get_mean)\n sum = property(__get_sum)\n std = property(__get_std)\n missing = property(__get_missing)\n nonmissing = property(__get_nonmissing)\n","sub_path":"pixiepython/timeseries.py","file_name":"timeseries.py","file_ext":"py","file_size_in_byte":15755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"278736548","text":"# 一个通用的发送get请求的模板函数\n# 使用retrying模块,增加超时重新请求\n# 添加post请求\n# 添加代理\n# 跳过ssl验证\nimport requests\nfrom retrying import retry\n\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36\"}\n\n\n# 最多尝试10次\n@retry(stop_max_attempt_number=10)\ndef _parse_url(url, method, data, proxies, verify):\n # print(\"*\" * 50) # 测试retry是否进行了多次请求\n if method.lower() == \"get\":\n response = requests.get(url, headers=headers, timeout=3, proxies=proxies, verify=verify)\n elif method.lower() == \"post\":\n response = requests.post(url, data=data, headers=headers, timeout=3, proxies=proxies, verify=verify)\n else:\n raise NotImplementedError(\"{}方法尚未实现\".format(method))\n assert response.status_code == 200\n return response.content.decode()\n\n\ndef parse_url(url, method=\"GET\", data=None, proxies=None, verify=True):\n try:\n html_str = _parse_url(url, method, data, proxies, verify)\n except Exception as e:\n print(e)\n html_str = None\n return html_str\n\n\nif __name__ == '__main__':\n url = \"http://www.baidu.com\"\n data = {\"from\": \"zh\",\n \"to\": \"en\",\n \"token\": \"3018ae176904de63297751917421d1f7\",\n }\n proxies = {\n \"http\": \"http://180.183.27.225:8080\"\n }\n # print(parse_url(url))\n # print(parse_url(url, method=\"POST\", data=data, proxies=proxies))\n # print(parse_url(url, verify=False))\n print(parse_url(url, method=\"PULL\", data=data, proxies=proxies, verify=True))\n","sub_path":"spider/day03/04_parse_url_跳过ssl验证.py","file_name":"04_parse_url_跳过ssl验证.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"332458076","text":"\"\"\"\nThis is a binning method using principal component analysis\nand the BAG algorithm outlined in binning_as_clustering.ipynb\n\nEvery classifier module needs to:\n - have construction of the type \n __init__ (self, bands, options) (see examples below)\n - implement two functions: \n train (self, training_data,training_z)\n apply (self, data).\n - define valid_options class varible.\n\nSee Classifier Documentation below.\n\"\"\"\n\nfrom .base import Tomographer\nimport numpy as np\n\nimport jax\nimport jax.numpy as jnp\nfrom jax.core import UnexpectedTracerError\nfrom jax import config\nconfig.update(\"jax_enable_x64\", True)\n\nfrom tomo_challenge.jax_metrics import compute_snr_score, compute_fom\n\nclass PCACluster(Tomographer):\n \"\"\" PCA based clustering algorithm \"\"\"\n \n # valid parameter -- see below\n valid_options = [\"bins\", \"metric\", \"verbose\", \"buzzard\"]\n # this settings means arrays will be sent to train and apply instead\n # of dictionaries\n wants_arrays = False\n \n def __init__ (self, bands, options):\n \"\"\"Constructor\n \n Parameters:\n -----------\n bands: str\n string containg valid bands. PCACluster is designed for griz with colors and errors\n options: dict\n options come through here. Valid keys are listed as valid_options\n class variable. \n\n Note:\n -----\n Valiad options are:\n \"bins\" - number of tomographic bins\n \"metric\" - the metric to optimize for, one of {\"SNR\", \"FOM\", \"FOM_DETF\"}\n \"verbose\" - Whether to print verbosely. This prints continual training updates. \n\n \"\"\"\n self.bands = bands\n self.opt = options\n \n self.eigs = [] # To store eigen vectors from training to use during testing\n self.centroids = [] # Centroids for classification purposes.\n\n def train (self, training_data, training_z):\n \"\"\"Trains the classifier\n \n Parameters:\n -----------\n training_data: dict, size Ngalaxes x Nbands\n training data, each row is a galaxy, each column is a band as per\n band defined above\n training_z: numpy array, size Ngalaxies\n true redshift for the training sample\n\n \"\"\"\n impl = self.opt[\"metric\"].lower()\n verbose = self.opt[\"verbose\"]\n num_centroids = self.opt[\"bins\"]\n buzzard = self.opt[\"buzzard\"] if \"buzzard\" in self.opt.keys() else False\n \n # Gets the color data and the errors which we will use for weights.\n color_data = []\n f = [\"r\", \"gr\", \"ri\", \"rz\"] if self.bands == \"griz\" else [\"r\", \"ri\", \"rz\"]\n for c in f:\n color_data.append(training_data[c])\n color_data = np.asarray(color_data).T\n errs = training_data[\"r_err\"].reshape(-1, 1)\n \n # Converts the errors to weights by using 1/err^2\n # Since errors very close to 0 blow up, this code will set errors\n # below the threshold to 1 and scale everything s.t. the weights\n # are in the range (0, 1]\n err_thresh = 0.01\n err_cond = errs >= err_thresh\n weights = np.where(errs < err_thresh, 1, 1/errs**2)\n weights[err_cond] = weights[err_cond] / np.max(weights[err_cond])\n \n # Make the mean zeroish by subtracting weighted mean then find the covariance matrix\n # divde by len-1 because sample and not population covariance\n # Not that at this size this is going to matter though.\n color_shifted = color_data - np.average(color_data[:,0].reshape(-1, 1), weights=weights)\n cov = color_shifted.T @ color_shifted / (color_shifted.shape[0] - 1)\n \n # My own implementation of finding a PCA eigenvector with weights\n # using an EM-based algorithm\n def find_eigenvector(data, weights=None):\n # Start with position 1985. 1985 is the year my favourite\n # movie came out. No other reason than that. \n phi = data[1985].reshape(1, -1)\n\n if weights is None:\n weights = np.ones_like(phi)\n\n thresh = 1e-6\n cond = False\n i = 0\n while not cond:\n # Find the coefficients that match the eigen vector to the data vector\n coeffs = data @ phi.T\n\n # Project the data along phi axis by multiplying the data by the coefficient\n proj = data * coeffs * weights\n\n # Sum all the projected ones to find the new eigenvector and then divide by the\n # length of the vector to reduce it to unit vector length.\n phi_new = np.sum(proj, axis=0)\n phi_new = phi_new / np.linalg.norm(phi_new)\n\n # If all of the dimensions changes by less than thresh then the\n # condition is set to true and the loop breaks\n cond = np.all((phi_new - phi) < thresh)\n\n phi = phi_new.reshape(1, -1)\n i += 1\n return phi\n \n # We here find the eigenvectors. We only need two since I'll be working in 2-D\n num_eigs = 3\n eigs = np.zeros((color_shifted.shape[1], num_eigs))\n temp_data = np.copy(color_shifted)\n if verbose: print(\"Finding eigenvectors for dimensionality reduction\")\n for i in range(num_eigs):\n v = find_eigenvector(temp_data, None)\n eigs[:,i] = (v)\n\n # Subtract the projections of the found eigen vector to start finding the next one.\n coeffs = temp_data @ v.T\n temp_data = temp_data - coeffs * v\n \n # The selection of principal axis vectors that will reduce dimensionality\n self.eigs = eigs\n data_reduced = color_data @ self.eigs\n \n # I took this cut from the random forest example.\n # I cut after doing the PCA in case the cut changes the\n # principal axes and I want to avoid that.\n np.random.seed(1985) # To ensure this is the same every time.\n cut = np.random.uniform(0, 1, data_reduced.shape[0]) < 0.05\n data_cut = data_reduced[cut]\n z_cut = training_z[cut]\n \n # These next \"few\" lines are helper functions for the training.\n @jax.jit\n def softmax(x, beta=1):\n return jnp.exp(beta * x) / jnp.sum(jnp.exp(beta * x), axis=0)\n\n @jax.jit\n def dist(points, centroids, beta=1):\n # Finds the distance between the points and the centroids\n dist = []\n for center in centroids:\n shift = points - center\n dist.append(jnp.linalg.norm(shift, axis=1))\n\n # Converting to numpy array so we can use boolean indexing\n dist = jnp.asarray(dist)\n\n # Which category these would be assigned to based on their distances\n # soft min, don't have to one_hot then and the gradient should work.\n return softmax(-dist, beta).T \n\n # This is by far not the best way to do this but alas...\n @jax.jit\n def dist_snr(points, centroids, z, beta=1):\n cat = dist(points, centroids, beta)\n return -compute_snr_score(cat, z, binned_nz=True)\n\n @jax.jit\n def dist_fom(points, centroids, z, beta=1):\n cat = dist(points, centroids, beta)\n return -compute_fom(cat, z, binned_nz=True)\n \n @jax.jit\n def dist_fom_detf(points, centroids, z, beta=1):\n cat = dist(points, centroids, beta)\n return -compute_fom(cat, z, inds=[5, 6], binned_nz=True)\n \n def get_equality_centroids(data, redshift, n_bins=3):\n # Find the edges that split the redshifts into n_bins bins of\n # equal number counts in each\n p = np.linspace(0, 100, n_bins + 1)\n z_edges = np.percentile(redshift, p)\n\n training_bin = np.zeros_like(data[:, 0])\n\n # Now find all the objects in each of these bins\n for i in range(n_bins):\n z_low = z_edges[i]\n z_high = z_edges[i + 1]\n training_bin[(redshift > z_low) & (redshift <= z_high)] = i\n\n centroids = []\n for i in range(0, int(training_bin.max()) + 1):\n cond = training_bin == i\n centroids.append(data[cond].mean(axis=0))\n\n return np.asarray(centroids)\n \n # points = data_cut\n # redshift = z_cut\n beta = np.ones(1) * num_centroids\n if verbose: print(f\"Using beta: {beta}\")\n\n # The function we're optimizing, can't use string inputs in functions \n # we're differentiating, hence this. The name 'd2' is a historical artifact\n # from when I was implementing various distance functions named dist, d2, and d3.\n if impl == \"fom\":\n d2 = dist_fom\n elif impl == \"fom_detf\":\n d2 = dist_fom_detf\n else:\n d2 = dist_snr\n\n # Technically this is the actual training loop but I call it twice\n # hence the abstraction to a function.\n def loop_and_improve(val, num_epochs):\n to_improve = np.copy(val)\n val_history = []\n\n # These top and bottom are designed for DETF.\n # Need to reduce by factor of ~10^1 for SNR and\n # ~10^2 for FOM, since DETF is order ~10, SNR ~10^2, FOM ~10^3\n top = -1.5\n bottom = -3\n\n if impl == \"fom\": \n top -= 2\n bottom -= 2\n elif impl == \"snr\":\n top -= 1\n bottom -= 1\n elif buzzard and impl == \"fom_detf\":\n top += 1\n\n lr_arr = np.logspace(bottom, top, num_epochs // 2) * 2.5\n lr_arr = np.concatenate([lr_arr, np.flip(lr_arr, 0)])\n\n # Do this first so we know where we start.\n val, grads = jax.value_and_grad(d2, 1)(data_cut, to_improve, z_cut, beta)\n print(f\"Starting {impl.upper()}: {-val}\")\n best = np.copy(to_improve)\n best_score = val\n\n # Terminate at the number of epochs or if the change \n # in snr is too small to be meaningful. We also force \n # a minimum of (num epochs // 3) epochs.\n i = 0\n delta_val = 1\n min_change = 0.25\n while (i < num_epochs and abs(delta_val) > min_change) or i < num_epochs // 3:\n try:\n cur_lr = lr_arr[i]\n if verbose: print(f\"Epoch {i + 1} LR {np.round(cur_lr, 6)}\")\n to_improve += -(grads) * cur_lr\n\n # Finding the resultang value and then the grad for the next epoch.\n val, grads = jax.value_and_grad(d2, 1)(data_cut, to_improve, z_cut, beta)\n\n if verbose: print(f\"{impl.upper()}: {-val}\")\n val_history.append(val)\n\n # Storing the best found score in case we jump out of the minimum\n # (possible... if not likely in some situations)\n if val < best_score:\n best = np.copy(to_improve)\n best_score = val\n\n i += 1\n if len(val_history) > 1:\n delta_val = val - val_history[-2]\n if verbose: print(f\"Delta {impl.upper()}: {-delta_val}\")\n\n except UnexpectedTracerError:\n # I swear this isn't my fault.\n if verbose: print(\"Tracer Error, retrying epoch\")\n continue\n\n return (best, best_score, val_history)\n\n # This finds the mean in all directions then organizes\n # the starting centroids in an equally spaced circle around the x-y center.\n cent = np.mean(data_cut, axis=0)\n rad_diff = 2 * np.pi / num_centroids\n l = []\n for i in range(num_centroids):\n p = 0.15 * np.asarray([np.cos(i * rad_diff), np.sin(i * rad_diff), 0])\n l.append(p + cent)\n\n # Run twice with two different starting centroids and use the best one.\n num_epochs = 100\n if verbose: print(\"\\nStart 1\")\n centroids = np.asarray(l)\n c1, score1, _ = loop_and_improve(centroids, num_epochs)\n\n if verbose: print(\"\\nStart 2\")\n centroids = get_equality_centroids(data_cut, z_cut, num_centroids)\n c2, score2, _ = loop_and_improve(centroids, num_epochs)\n\n if score1 < score2:\n if verbose: print(f\"Circular start used. {-score1} > {-score2}\")\n self.centroids = c1\n else:\n if verbose: print(f\"Equality start used. {-score2} > {-score1}\")\n self.centroids = c2\n\n \n # In theory for maximum speed you could jit compile this, since it's\n # all pure jax numpy math.\n def apply (self, data):\n \"\"\"Applies training to the data.\n \n Parameters:\n -----------\n Data: numpy array, size Ngalaxes x Nbands\n testing data, each row is a galaxy, each column is a band as per\n band defined above\n\n Returns: \n tomographic_selections: numpy array, int, size Ngalaxies\n tomographic selection for galaxies return as bin number for \n each galaxy.\n \"\"\"\n \n data_valid = []\n f = [\"r\", \"gr\", \"ri\", \"rz\"] if self.bands == \"griz\" else [\"r\", \"ri\", \"rz\"]\n for c in f:\n data_valid.append(data[c])\n data_valid = np.asarray(data_valid).T\n data_valid_r = data_valid @ self.eigs\n \n # Finds the distance between the points and the centroids\n dist = []\n for center in self.centroids:\n shift = data_valid_r - center\n dist.append(jnp.linalg.norm(shift, axis=1))\n\n # Converting to numpy array so we can use axis for argmin.\n dist = jnp.asarray(dist)\n\n # Which category these would be assigned to based on their distances\n return jnp.argmin(dist, axis=0)\n","sub_path":"tomo_challenge/classifiers/pca_cluster.py","file_name":"pca_cluster.py","file_ext":"py","file_size_in_byte":14072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"206453689","text":"\"\"\"\nSimple script to measure performance of the read_evidence module.\n\nUsage:\n %(prog)s /path/to/reads.bam ... --num-loci-per-region X\n\nExample:\n %(prog)s my.bam 8:101723999-101725999 --num-loci-per-region 2000\n\n\"\"\"\nimport argparse\nimport sys\nimport random\nimport time\n\nimport varcode\nimport varcode.read_evidence\nfrom varcode.locus import Locus\n \nPARSER = argparse.ArgumentParser(usage=__doc__)\nPARSER.add_argument(\"bam_path\")\nPARSER.add_argument(\"regions\", nargs=\"+\")\nPARSER.add_argument(\"--num-loci-per-region\", type=int, default=100)\n\ndef parse_region(s):\n (contig, rest) = s.split(\":\")\n (start, end) = rest.split(\"-\")\n return varcode.Locus.from_inclusive_coordinates(\n contig, int(start), int(end))\n\ndef go(argv):\n args = PARSER.parse_args(argv)\n\n loci_regions = [parse_region(s) for s in args.regions]\n\n loci = []\n for region in loci_regions:\n new_loci = random.sample(\n range(region.start, region.end), args.num_loci_per_region)\n loci.extend(\n Locus.from_inclusive_coordinates(region.contig, locus)\n for locus in new_loci)\n\n print(\"Loading pileups for %d loci.\" % len(loci))\n\n start = time.time()\n varcode.read_evidence.PileupCollection.from_bam(args.bam_path, loci)\n elapsed = time.time() - start\n\n print(\"Read pileups for %d loci in %f.2 seconds = %f locus / sec\" % (\n len(loci), elapsed, len(loci) / elapsed))\n\nif __name__ == '__main__':\n go(sys.argv[1:])\n","sub_path":"test/read_evidence_performance.py","file_name":"read_evidence_performance.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"231346078","text":"import gym\nfrom gym import spaces\nfrom math import inf\nimport numpy as np\nfrom enum import Enum\n\n\nclass SortingEnv(gym.Env):\n\n metadata = { 'render.modes': [ 'ascii' ] }\n\n def __init__(self, init_list):\n\n self.init_list = init_list\n\n self.reward_range = (-100, 100)\n self.action_space = spaces.Discrete(n=9)\n self.observation_space = spaces.Discrete(n=4608)\n\n def reset(self):\n\n # Data-Specific\n self.i = 0\n self.j = 0\n self.k = 0\n self.len = len(self.init_list)\n self.list = self.init_list.copy()\n\n # RP-Specific\n self.update_flags()\n self.last_action = 0 # NOOP\n\n return self.encode_state()\n\n def update_flags(self):\n\n self.ieq0 = (self.i == 0)\n self.jeq0 = (self.j == 0)\n self.ieqlen = (self.i == self.len)\n self.jeqlen = (self.j == self.len)\n self.keq0 = (self.k == 0)\n self.keqlen = (self.k == self.len)\n self.iltj = (self.i < self.j)\n self.jlti = (self.j < self.i)\n self.listigtlistj = (self.i < self.len) and (self.j < self.len) and (self.list[self.i] > self.list[self.j])\n\n def encode_state(self):\n\n return int((self.ieq0 << 0) \\\n + (self.jeq0 << 1) \\\n + (self.ieqlen << 2) \\\n + (self.jeqlen << 3) \\\n + (self.keq0 << 4) \\\n + (self.keqlen << 5) \\\n + (self.iltj << 6) \\\n + (self.jlti << 7) \\\n + (self.listigtlistj << 8) \\\n + (self.last_action << 9))\n\n def step(self, action):\n\n reward = 0\n done = False\n\n # NOOP\n if action == 0:\n raise Exception('NOOP is not permitted!')\n\n # TERMINATE\n elif action == 1:\n\n done = True\n reward = 100 if (self.list == sorted(self.list)) else -100\n\n # INCI\n elif action == 2:\n self.i = min(self.i + 1, self.len)\n\n # INCJ\n elif action == 3:\n self.j = min(self.j + 1, self.len)\n\n # INCK\n elif action == 4:\n self.k = min(self.k + 1, np.iinfo(np.uint64).max)\n\n # SETIZERO\n elif action == 5:\n self.i = 0\n \n # SETJZERO\n elif action == 6:\n self.j = 0\n\n # SETKZERO\n elif action == 7:\n self.k = 0\n\n # SWAP\n elif action == 8:\n\n # Out of bounds exception. Swap not possible.\n if (self.i >= self.len) or (self.j >= self.len):\n reward = -10\n\n # Swap is possible.\n else:\n\n self.list[self.i], self.list[self.j] = self.list[self.j], self.list[self.i]\n\n # ..., list_i, ..., list_j, ...\n if self.i < self.j:\n reward = 10 if (self.list[self.i] < self.list[self.j]) else -10\n\n # ..., list_j, ..., list_i, ...\n elif self.j < self.i:\n reward = 10 if (self.list[self.j] < self.list[self.i]) else -10\n\n # ..., list_i/list_j, ... (swap has no effect)\n else:\n reward = -10\n \n self.last_action = action\n self.update_flags()\n\n return self.encode_state(), reward, done, {}\n\n def pretty_last_action(self):\n\n return [\n 'NOOP',\n 'TERMINATE',\n 'INCI',\n 'INCJ',\n 'INCK',\n 'SETIZERO',\n 'SETJZERO',\n 'SETKZERO',\n 'SWAP'\n ][self.last_action]\n\n def render(self, mode='ascii'):\n\n print(f\"Data-Specific:\")\n print(f\"i = {self.i}, j = {self.j}\")\n print(f\"k = {self.k}, len = {self.len}\")\n print(f\"list = {self.list}\")\n print(f\"RP-Specific:\")\n print(f\"i {'=' if self.ieq0 else '!='} 0, j {'=' if self.jeq0 else '!='} 0\")\n print(f\"i {'=' if self.ieqlen else '!='} len, j {'=' if self.jeqlen else '!='} len\")\n print(f\"k {'=' if self.keq0 else '!='} 0, k {'=' if self.keqlen else '!='} len\")\n print(f\"i {'<' if self.iltj else '>='} j, j {'<' if self.jlti else '>='} i\")\n print(f\"list[i] {'>' if self.listigtlistj else '<='} list[j]\")\n print(self.pretty_last_action())\n","sub_path":"gym-sorting/gym_sorting/envs/sorting_env.py","file_name":"sorting_env.py","file_ext":"py","file_size_in_byte":4293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"33019217","text":"# 1450번 냅색문제\n\ndef a_brute_force(l, w):\n if l >= len(a_weight):\n a_sum.append(w)\n return\n\n a_brute_force(l + 1, w)\n a_brute_force(l + 1, w + a_weight[l])\n\n\ndef b_brute_force(l, w):\n if l >= len(b_weight):\n b_sum.append(w)\n return\n\n b_brute_force(l + 1, w)\n b_brute_force(l + 1, w + b_weight[l])\n\n\ndef lower_bound(start, end, key):\n global cnt\n\n while start < end:\n mid = (start + end) // 2\n\n if b_sum[mid] <= key:\n start = mid + 1\n else:\n end = mid\n return end\n\n\nn, c = map(int, input().split())\nweight = list(map(int, input().split()))\ncnt = 0\n\na_weight = weight[:n // 2]\nb_weight = weight[n // 2:]\n\na_sum = []\nb_sum = []\na_brute_force(0, 0)\nb_brute_force(0, 0)\nb_sum.sort()\n\nfor i in a_sum:\n if c - i < 0:\n continue\n cnt += (lower_bound(0, len(b_sum), c - i))\n\nprint(cnt)\n","sub_path":"Python/1450.py","file_name":"1450.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"566493690","text":"\nfrom sklearn import preprocessing, svm\nimport pandas as pd\nimport numpy as np\nfrom keras.utils import np_utils\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.models import Sequential\nfrom keras.layers import Activation, Dense, Dropout\nfrom sklearn.preprocessing import MultiLabelBinarizer ,LabelEncoder\nimport sklearn.datasets as skds\nfrom pathlib import Path\nfrom scipy.io import arff\nfrom sklearn.preprocessing import MinMaxScaler,StandardScaler\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import classification_report,confusion_matrix,accuracy_score\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nfrom keras.callbacks import EarlyStopping\nimport keras\nfrom time import time\n \nclass TimingCallback(keras.callbacks.Callback):\n def __init__(self):\n self.logs=[]\n def on_epoch_begin(self, epoch, logs={}):\n self.starttime=time()\n def on_epoch_end(self, epoch, logs={}):\n self.logs.append(time()-self.starttime)\n \n \nseed = 7\nnp.random.seed(seed)\n\ndata = arff.loadarff('charlev+extremism.arff')\ndt = pd.DataFrame(data[0])\n\ndt_label_class = dt['classification'].astype(float)\n\ndt_features = dt.iloc[:,1:72].apply(np.ceil)\n\ntrain_x,test_x ,train_y,test_y = train_test_split(dt_features ,dt_label_class, test_size= 0.3 , random_state= 1, stratify= dt_label_class) #\n\nscaler = StandardScaler()\n\nscaler.fit(train_x)\nX_train = scaler.transform(train_x)\nX_test = scaler.transform(test_x)\n\nnum_labels = 3\nvocab_size = 71\n# define baseline model\ndef baseline_model():\n # create model\n model = Sequential()\n model.add(Dense(512, input_shape=(vocab_size,)))\n model.add(Activation('relu'))\n model.add(Dropout(0.3))\n model.add(Dense(512)) \n model.add(Activation('relu'))\n model.add(Dropout(0.3))\n model.add(Dense(num_labels))\n model.add(Activation('softmax'))\n model.summary()\n # Compile model\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\n\n# Fit the model\nestimator = KerasClassifier(build_fn=baseline_model, nb_epoch=10, batch_size=10, verbose=0)\n\n\n#kfold = KFold(n_splits=5, shuffle=True, random_state=seed)\n\n#results = cross_val_score(estimator, X_train, train_y, cv=kfold)\n#print(\"\\nOverall Validation accuracy: %.2f%% (%.2f%%)\" % (results.mean() * 100, results.std() * 100))\n\n# build the neural network from all the training set\n#history = estimator.fit(X_train, train_y)\ncb = TimingCallback()\nes = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=50)\n\nhistory = estimator.fit(X_train, train_y,validation_split=0.33,epochs=10,verbose=0,callbacks=[cb, es])\npredictions = estimator.predict(X_test)\nprint(\"Training Time For each Epoch\")\nprint(cb.logs)\nprint(\"=======================================================================\")\n# list all data in history\n\n\nplt.plot(history.history['acc'])\nplt.plot(cb.logs)\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('time')\nplt.legend(['accuracy', 'time'], loc='upper left')\nplt.show()\n\n\n\n# summarize history for accuracy\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n\n# summarize history for loss\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n\n# build the confusion matrix after classifing the test data\n\n\nprint(accuracy_score(test_y, predictions) * 100)\nprint(classification_report(test_y,predictions))\n\ncm = confusion_matrix(test_y, predictions)\nprint(\"\\nThe confusion matrix when apply the test set on the trained nerual network:\\n\", cm)","sub_path":"POSIT Application/posit DNN/wordcharDNN.py","file_name":"wordcharDNN.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"312502012","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pickle\nimport os\nimport yaml\n\nimport batch\nimport unet #UNETをimportします\n\n# np.random.seed(1919114)\n# tf.set_random_seed(1919114)\n\n# yaml形式の設定を読み込む\nf = open(\"settings.yml\", encoding='UTF-8')\nsettings = yaml.load(f)\n\nwith open('data/image572', 'rb') as f:\n image_x = pickle.load(f)\n\nwith open('data/nucleus_label', 'rb') as f:\n image_t = pickle.load(f)\n\nimage_x, image_t = batch.shuffle_image(image_x, image_t)\n\n#後で消す\nprint(len(image_x))\nnum_data = settings[\"num_data\"] ##訓練用データ数\nnum_test = settings[\"num_test\"]\ntrain_x = image_x[:num_data]\ntest_x = image_x[num_data:num_data + num_test]\ntrain_t = image_t[:num_data]\ntest_t = image_t[num_data:num_data + num_test]\n\n#UNETを初期化しています。\nunet = unet.UNET(settings[\"input_sizex\"], settings[\"input_sizey\"], settings[\"num_class\"], depth = settings[\"depth\"], layers_default = settings[\"layers_default\"])\n\nBatch_x = batch.Batch(train_x)\nBatch_t = batch.Batch(train_t)\nBatch_num = settings[\"Batch_num\"] ##バッチ数\n\n\ni = 0\nfor _ in range(settings[\"learning_times\"]): ##学習回数\n i += 1\n batch_x = Batch_x.next_batch(Batch_num)\n batch_t = Batch_t.next_batch(Batch_num)\n unet.sess.run(unet.train_step,\n feed_dict={unet.x:batch_x, unet.t:batch_t, unet.keep_prob:settings[\"keep_prob\"]})\n if i % 10 == 0:\n summary, loss_val, acc_val = unet.sess.run([unet.summary, unet.loss, unet.accuracy],\n feed_dict={unet.x:test_x,\n unet.t:test_t,\n unet.keep_prob:1.0})\n print ('Step: %d, Loss: %.12f, Accuracy: %.12f'\n % (i, loss_val, acc_val))\n\n if os.path.isdir('saver') == False:\n os.mkdir('saver')\n unet.saver.save(unet.sess, os.path.join(os.getcwd(), 'saver/tmp/unet_session'), global_step=i)\n unet.writer.add_summary(summary, i)\n\n\nnum_image = settings[\"num_image\"]\noutput_sizex = unet.output_sizex\noutput_sizey = unet.output_sizey\nnum_class = unet.num_class\n\ntimage = np.array(unet.sess.run([unet.tout], feed_dict = {unet.x:test_x, unet.t:test_t, unet.keep_prob:1.0}))\ntimage = timage.reshape(-1, output_sizex, output_sizey, num_class)\nresult = np.array(unet.sess.run([unet.result], feed_dict = {unet.x:test_x, unet.t:test_t, unet.keep_prob:1.0}))\nresult = result.reshape(-1, output_sizex, output_sizey, num_class)\nresult_image = np.zeros(result.size).reshape(-1, output_sizex, output_sizey, num_class)\n\nfor i in range(len(result)):\n for j in range(output_sizex):\n for k in range(output_sizey):\n at = np.argmax(result[i][j][k])\n result_image[i][j][k][at] = 1\n\nfig = plt.figure(figsize = (12, 60))\nfor i in range(num_image):\n subplot = fig.add_subplot(num_image, 3, i * 3 + 1)\n subplot.imshow(timage[i,...,0], cmap = 'gray')\n subplot = fig.add_subplot(num_image, 3, i * 3 + 2)\n subplot.imshow(result_image[i,...,0], cmap = 'gray')\n subplot = fig.add_subplot(num_image, 3, i * 3 + 3)\n subplot.imshow(test_x[i,:,:], cmap = 'gray')\n\nplt.show()\n","sub_path":"train_unet.py","file_name":"train_unet.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"562059336","text":"from math import sin, cos, atan, fabs, degrees, pi\nimport numpy as np\n\nclass Jacobi2:\n def __init__(self, matriz, n, erro):\n self.a = matriz\n self.tam = n\n self.erro = erro\n\n\n def metodo(self):\n P = np.identity(self.tam, dtype=float)\n J = np.identity(self.tam, dtype=float)\n np.set_printoptions(suppress=True)\n A_velha = self.a\n A_nova = self.a\n lamb = [0] * self.tam\n val = 100\n\n while(val > self.erro):\n A_nova, J = self.varreduraJacobi(A_velha)\n A_velha = A_nova\n P = P.dot(J)\n val = self.soma_quadrados(A_nova)\n\n for i in range(self.tam):\n lamb[i] = A_nova[i][i]\n\n return P, lamb, A_nova\n\n \n def varreduraJacobi(self, A):\n J = np.identity(self.tam, dtype=float)\n A_velha = A\n A_nova = A\n \n \n for j in range(0, self.tam-1):\n for i in range(j+1, self.tam):\n J_ij = self.matrizJacobi(A_velha, i, j)\n aux1 = np.transpose(J_ij).dot(A_velha)\n A_nova = aux1.dot(J_ij)\n print(\"Matriz A_nova da iteração ({},{})\\n\".format(i, j))\n print(self.padronizar(A_nova))\n print(\"\\n\")\n A_velha = A_nova\n aux2 = J\n J = aux2.dot(J_ij)\n \n return A_nova, J\n\n\n def matrizJacobi(self, A, i, j):\n J_ij = np.identity(self.tam, dtype=float)\n ang = 0\n erro = self.erro\n\n if(fabs(A[i][j]) <= erro):\n return J_ij\n \n if(fabs(A[i][i] - A[j][j]) <= erro):\n ang = pi/4\n else:\n ang = (1/2)*atan((-2*A[i][j])/(A[i][i]-A[j][j]))\n \n J_ij[i][i] = cos(ang)\n J_ij[j][j] = cos(ang)\n J_ij[i][j] = sin(ang) \n J_ij[j][i] = -sin(ang)\n\n return J_ij\n\n\n def soma_quadrados(self, A):\n soma = 0\n for j in range(0, self.tam-1):\n for i in range(j+1, self.tam):\n soma = soma + (A[i][j]*A[i][j])\n \n return soma","sub_path":"trabalho14_mn2/jacobi2.py","file_name":"jacobi2.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"555524668","text":"import sys\nsys.stdin = open('input.txt')\n\nn = int(input())\n\ncheck = [0,0]+[1]*(n-1)\nend = int(n**0.5)+1\n\nsosu = []\nfor i in range(2,end):\n if(check[i]):\n for j in range(2*i,n+1,i):\n if(check[j]):\n check[j] = 0\nfor i in range(n+1):\n if(check[i]):\n sosu.append(i)\n\nlength = len(sosu)\ns = 0\ne = 0\ncnt = 0\nresult = 0\n\nwhile(True):\n if(result >= n):\n result -= sosu[s]\n s += 1\n elif(e == length):\n break\n else:\n result += sosu[e]\n e += 1\n\n if(result == n):\n cnt += 1\n\nprint(cnt)","sub_path":"python daily coding/2020.11.09 (정수론)/1644 (소수의 연속합).py","file_name":"1644 (소수의 연속합).py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"178041745","text":"import collections\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\n\nclass SENet_Digit(object):\n def __init__(self, is_training, num_classes,cardinality,reduction_ratio):\n self.num_classes = num_classes\n self._is_training = is_training\n self.cardinality=cardinality\n self.reduction_ratio=reduction_ratio\n\n\n def preprocess(self, inputs):\n # ResNet暂不需要做输入预处理\n # preprocessed_inputs = tf.to_float(inputs)\n # preprocessed_inputs = tf.subtract(preprocessed_inputs, 128.0)\n # preprocessed_inputs = tf.div(preprocessed_inputs, 128.0)\n return inputs\n\n def SE_Moudle(self,inputs):\n num_channels=inputs.get_shape().as_list()[-1]\n\n with tf.variable_scope(\"SE_Moudle\"):\n #Squeeze,通过全局池化得到全局信息\n moudle = tf.reduce_mean(inputs,axis=[1,2],keepdims=True,name=\"global_avg_pooling\")\n\n #Excitation,这里使用全卷积充当全连接的作用,减少参数量,不改变计算量\n #通过全连接得到feature map 每个通道的权重\n #这也是SENet的核心\n moudle = slim.convolution2d(moudle,num_outputs=num_channels/self.reduction_ratio,\n kernel_size=1,stride=1,normalizer_fn=None,scope=\"dim_decrease\")\n\n channel_weights = slim.convolution2d(moudle,num_outputs=num_channels,kernel_size=1,\n stride=1,activation_fn=tf.nn.sigmoid,normalizer_fn=None,\n scope=\"dim_increase\")\n\n scale = inputs * channel_weights\n\n return scale\n\n def ResNeXt_bottleneck_MultiBranch(self,inputs,bottleneck_depth,stride):\n branchs=[]\n for branch_id in range(self.cardinality):\n with tf.variable_scope(\"branch_{}\".format(branch_id)):\n branch = slim.convolution2d(inputs,num_outputs=bottleneck_depth,\n kernel_size=1,stride=1,scope=\"1x1conv\")\n branch = slim.batch_norm(branch,scope=\"bn1\")\n\n branch = slim.convolution2d(branch,num_outputs=bottleneck_depth,\n kernel_size=3,stride=stride)\n branch = slim.batch_norm(branch, scope=\"bn2\")\n branchs.append(branch)\n\n concat_bottleneck = tf.concat(branchs,axis=3,name=\"concat\")\n return concat_bottleneck\n\n def SE_ResNeXt_unit(self,inputs,unit_output_channel,downsample,scope_name):\n with tf.variable_scope(scope_name):\n input_channel=inputs.get_shape().as_list()[-1]\n stride=2 if downsample else 1 #该resNeXt单元进行下采时stride=2\n\n #分支数cardinality是固定的,但是bottleneck的通道数是不断改变的,两者和整个单元输出通道数有以下关系:\n bottleneck_depth=unit_output_channel//2//self.cardinality\n concat_bottleneck=self.ResNeXt_bottleneck_MultiBranch(inputs,bottleneck_depth,stride)\n\n #对连接后的的特征图再进行一次整体转换\n bottleneck_1x1=slim.convolution2d(concat_bottleneck,num_outputs=unit_output_channel,\n kernel_size=1,stride=1)\n\n #以ResNeXt作为baseline,同时使用SE_Moudle\n SE_moudle = self.SE_Moudle(bottleneck_1x1)\n\n _input=inputs\n if downsample: #下采样时bottleneck输出和idendity mapping分支的特征图大小不一样\n _input=slim.avg_pool2d(_input,kernel_size=2,stride=2)\n\n if input_channel != unit_output_channel:\n _input=slim.convolution2d(_input,num_outputs=unit_output_channel,kernel_size=1,stride=1,\n scope=\"matchDim\")\n\n unit_output=tf.nn.relu(SE_moudle+_input,name=\"Addition_relu\")\n return unit_output\n\n #inputs:[batch_size,224,224,3]\n def inference(self, inputs):\n with slim.arg_scope(self.SE_ResNeXt_arg_scope(is_training=self._is_training)):\n net = slim.convolution2d(inputs,num_outputs=64,kernel_size=3,stride=2)\n # net = slim.max_pool2d(net,kernel_size=3,stride=2,padding=\"SAME\")\n\n scope_name_format = \"unit{}_{}\"\n\n for unit_id in range(3):\n net = self.SE_ResNeXt_unit(net, 64, False,\n scope_name_format.format(1,unit_id))\n\n for unit_id in range(4):\n net = self.SE_ResNeXt_unit(net, 128, unit_id == 0,\n scope_name_format.format(2,unit_id))\n\n for unit_id in range(6):\n net = self.SE_ResNeXt_unit(net, 256, unit_id == 0,\n scope_name_format.format(3,unit_id))\n\n print(\"net:\",net)\n\n net_global_pool = tf.reduce_mean(net, [1, 2],name=\"global_pool\",keep_dims=True)\n net = slim.convolution2d(net_global_pool, num_outputs=self.num_classes,\n kernel_size=1, activation_fn=None, normalizer_fn=None, scope=\"full_conv\")\n\n logits = tf.squeeze(net, [1, 2], name='SpatialSqueeze')\n \n return logits\n\n def postprocess(self,logits):\n softmax=tf.nn.softmax(logits)\n classes=tf.cast(tf.argmax(softmax,axis=1),tf.int32)\n return softmax,classes\n\n def loss(self,logits,labels):\n softmax_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits+1e-8,labels=labels),name=\"softmax_loss\")\n tf.add_to_collection(\"Loss\",softmax_loss)\n loss_all=tf.add_n(tf.get_collection(\"Loss\"),name=\"total_loss\")\n return loss_all\n\n def SE_ResNeXt_arg_scope(self,is_training,weight_decay=0.0001,batch_norm_decay=0.95,\n batch_norm_epsilon=1e-5,batch_norm_scale=True):\n batch_norm_params={\n 'is_training':is_training,\n 'decay':batch_norm_decay,\n 'epsilon':batch_norm_epsilon,\n 'scale':batch_norm_scale,\n # 'updates_collections:':tf.GraphKeys.UPDATE_OPS\n }\n\n with slim.arg_scope(\n [slim.convolution2d],\n weights_regularizer=slim.l2_regularizer(weight_decay),\n weights_initializer=slim.variance_scaling_initializer(),\n activation_fn=tf.nn.relu,\n normalizer_fn=slim.batch_norm,\n normalizer_params=batch_norm_params):\n\n with slim.arg_scope([slim.batch_norm],**batch_norm_params) :\n with slim.arg_scope([slim.max_pool2d,slim.avg_pool2d],padding=\"SAME\") as arg_sc:\n return arg_sc","sub_path":"Classification/SENet/nets/SENet_Digit.py","file_name":"SENet_Digit.py","file_ext":"py","file_size_in_byte":6718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"190171093","text":"\"\"\"\nA coroutine to compute a running average\n\n >>> coro_avg = averager() # Call averager(), creating a generator object that is primed inside the primer function of the coroutine decorator.\n >>> from inspect import getgeneratorstate\n >>> getgeneratorstate(coro_avg) # getgeneratorstate reports GEN_SUSPENDED, meaning that the coroutine is ready to receive a value.\n 'GEN_SUSPENDED'\n >>> coro_avg.send(10) # You can immediately start sending values to coro_avg: that's the point of the decorator.\n 10.0\n >>> coro_avg.send(30)\n 20.0\n >>> coro_avg.send(5)\n 15.0\n\n\"\"\"\n\nfrom FluentPython.Chapter_16.coroutil import coroutine # Import the coroutine decorator\n\n@coroutine # Apply it to the averager function.\ndef averager(): # The body of the function is exactly the same as coroaverager0.\n total = 0.0\n count = 0\n average = None\n while True:\n term = yield average\n total += term\n count += 1\n average = total/count","sub_path":"FluentPython/Chapter_16/coroaverager1.py","file_name":"coroaverager1.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"554771394","text":"import time\nimport collections\n\nfrom django.http import HttpResponse\nfrom django.utils.simplejson import dumps\nfrom django.template.defaultfilters import timesince_filter, truncatechars\n\nfrom barium.apps.projects.constants import COLORS\nfrom barium.apps.projects.db import db\nfrom barium.apps.projects.db.projects import Project\nfrom barium.apps.projects.db.events import Event\nfrom barium.apps.projects.db.blocks import Block\nfrom barium.apps.projects.db.reports import Report\n\nfrom mongoengine.django.auth import User\nfrom mongoengine.queryset import DoesNotExist\n\n\ndef get_proj(view):\n def wrapper(request, username=None, repo=None):\n # it's already a project\n if isinstance(request, dict) and not any((username, repo)):\n return view(request)\n try:\n user = User.objects.get(username=username)\n except DoesNotExist:\n return HttpResponse('')\n p = db.projects.find_one({'username': user.username, 'repo': repo})\n if p is None:\n return HttpResponse('')\n return view(p)\n return wrapper\n\n\ndef to_json(view):\n def wrapper(*args, **kwargs):\n result = view(*args, **kwargs)\n if isinstance(result, HttpResponse):\n return result\n return HttpResponse(dumps(result, separators=(',', ':')),\n mimetype='application/json')\n return wrapper\n\n\n@to_json\n@get_proj\ndef pie(p):\n r = []\n for letter, c in sorted(Project(p).get_last_report()['letters'].items()):\n r.append({'value': c, 'color': COLORS[letter]})\n return r\n\n\n@to_json\n@get_proj\ndef hotspots(p):\n r = []\n for hotspot in Project(p).get_hotspots():\n event = Event(hotspot)\n block = event.get_block()\n r.append({\n 'letter': event.e['letter'],\n 'complete_name': truncatechars(Block(block).complete_name(), 40),\n 'github_link': event.github_link(block=block),\n })\n return r\n\n\n@to_json\n@get_proj\ndef module_hotspots(p):\n r = {'labels': [], 'datasets': [{'data': [], 'colors': []}]}\n for hotspot in Project(p).get_module_hotspots():\n ds = r['datasets'][0]\n ds['data'].append(hotspot['average_complexity'])\n name = db.paths.find_one({'_id': hotspot['path']})['name']\n if '/' in name:\n label = name.rsplit('/', 1)[1][-20:]\n else:\n label = name[-20:]\n r['labels'].append(label)\n ds['colors'].append(COLORS[hotspot['average_letter']])\n return r\n\n\n@to_json\n@get_proj\ndef feed(p):\n data = []\n for report, events in Project(p).get_events():\n tmp = {}\n tmp['add'] = []\n tmp['change'] = []\n for event in report.get_new():\n event = Event(event)\n block = event.get_block()\n tmp['add'].append({\n 'letter': event.e['letter'],\n 'url': event.github_link(block=block),\n 'complete_name': truncatechars(Block(block).complete_name(), 80)\n })\n for event in events:\n event = Event(event)\n block = event.get_block()\n tmp['change'].append({\n 'previous_letter': event.e['previous_letter'],\n 'letter': event.e['letter'],\n 'url': event.github_link(block=block),\n 'complete_name': truncatechars(Block(block).complete_name(), 80),\n 'rate': event.rate(),\n })\n tmp['message'] = report.r['message']\n tmp['url'] = report.r['url']\n tmp['hash'] = truncatechars(report.r['hash'], 10)\n tmp['created_at'] = timesince_filter(report.r['created_at'])\n data.append(tmp)\n return data\n\n\n@to_json\n@get_proj\ndef trends(p):\n blocks = []\n gpas = []\n for report in db.reports.find({'username': p['username'],\n 'repo': p['repo']}).sort('created_at', 1):\n timestamp = time.mktime(report['created_at'].timetuple())\n blocks.append([timestamp, sum(report['letters'].values())])\n gpas.append([timestamp, report['gpa']])\n return [{'key': 'Blocks analyzed', 'bar': True, 'values': blocks},\n {'key': 'GPA', 'values': gpas}]\n\n\n@to_json\n@get_proj\ndef gpa(p):\n r = {'labels': [], 'datasets': [{'data': []}]}\n ds = r['datasets'][0]\n ds['fillColor'] = \"rgba(92, 205, 201, 0.4)\"\n ds['strokeColor'] = \"#009b95\"\n ds['pointColor'] = \"#ff7100\"\n ds['pointStrokeColor'] = \"#fff\"\n for report in db.reports.find({'username': p['username'],\n 'repo': p['repo']}).sort('created_at', 1):\n if 'gpa' not in report:\n continue\n ds['data'].append(report['gpa'])\n r['labels'].append(time.strftime('%d %b %Y',\n report['created_at'].timetuple()))\n return r\n\n\n@to_json\n@get_proj\ndef blocks_breakdown(p):\n r = {'labels': [], 'datasets': []}\n blocks = collections.defaultdict(list)\n for report in db.reports.find({'username': p['username'],\n 'repo': p['repo']}).sort('created_at', 1):\n if 'letters' not in report:\n continue\n r['labels'].append(time.strftime('%d %b %Y',\n report['created_at'].timetuple()))\n for letter in 'ABCDEF':\n blocks[letter].append(report['letters'].get(letter) or 0)\n for letter, data in blocks.iteritems():\n if not any(data):\n continue\n ds = {'data': data}\n ds['fillColor'] = COLORS[letter]\n r['datasets'].append(ds)\n return r\n","sub_path":"barium/apps/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"126661060","text":"#!/usr/bin/env python3\nimport docker\nfrom dateutil.parser import parse\nconn = docker.from_env(version='auto')\n\n\ndef show_attrs(image):\n print(\"Image ID: \"+image.attrs['Id'].split(':')[1][0:12])\n # print(\"From: \"+str(image.attrs['RepoTags']))\n date = parse(image.attrs['Created'])\n print(\"Created:\", date)\n print()\n\n\nfor image in conn.images.list():\n show_attrs(image)\n","sub_path":"list_docker_images_py3.py","file_name":"list_docker_images_py3.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"563667699","text":"#!/usr/bin/env python\n\n\na = []\nk = []\ns = raw_input()\nwhile s != \"end\":\n a.append(int(s))\n s = raw_input()\nb = input()\ni = 0\nwhile i < len(a):\n if a[i] <= b:\n k.append(a[i])\n elif b > a[i]:\n k.append(b)\n k.append(a[i])\n i = i + 1\n \n","sub_path":"ca116/position-of-smallest.py","file_name":"position-of-smallest.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"134222989","text":"from django.shortcuts import render\nfrom rest_framework.decorators import api_view\nfrom django.http import HttpResponse\nfrom rest_framework.response import Response\nfrom django.http import JsonResponse\nimport os\nimport sys\n\nfrom .apps import *\n\n#imports from project\nfolder_path=os.path.dirname(os.path.abspath(__file__))\nsys.path.append(os.path.join(folder_path, '..','..','..' ))\nfrom policy.policyparser import *\nfrom devices.devices_manager import DevicesManager\nfrom devices.devices_status_db import DevicesStatusDB\nfrom fifo_manager import FIFOManager\n\npolicy_path=os.path.join(folder_path,'policies','current_policy.json')\n\n\n\n\n\n@api_view(['GET', 'POST' ])\ndef get_policy_list(request):\n \"\"\"\n Obtains the list of policies from the current policy file\n \n Arguments:\n request -- Empty request\n Returns:\n response -- List of policies \n \"\"\"\n pp = PolicyParser(policy_path, None)\n pp.initialize()\n return Response(str(pp))\n\n\n@api_view(['GET', 'POST' ])\ndef get_policy_file(request):\n \"\"\"\n Obtains the policy file\n \n Arguments:\n request -- Empty request\n Returns:\n response -- The content of the policy file\n \"\"\"\n\n file_path = policy_path\n with open(file_path, 'rb') as f:\n response = HttpResponse(f.read(), content_type=\"text/plain\")\n response['Content-Disposition'] = 'inline; filename='+os.path\\\n .basename(file_path)\n return response\n\n# Create your views here.\n@api_view(['GET', 'POST' ])\ndef get_policy_count(request):\n \"\"\"\n Counts the policies in the policy file\n \n Arguments:\n request -- Empty request\n Returns:\n response -- The content of the policy file\n \"\"\"\n pp = PolicyParser(policy_path, None)\n pp.initialize()\n return Response(len(pp.policies))\n\n\n@api_view(['GET', 'POST' ])\ndef set_policy_file(request):\n \"\"\"\n Persists a policy file on the path established on policy_path\n \n Arguments:\n request -- a request containing a policy_file POST variable\n Returns:\n response -- HttpResponse containing the file or the eexception\n \"\"\"\n\n file_received=request.FILES['policy_file']\n fr = file_received.read()\n #The biggest number in the labels will be the number of outputs\n #for line in f.decode(\"utf-8\").split(\"\\n\"):\n file = open(policy_path, 'wb')\n file.write(fr)\n file.close()\n\n fm = FIFOManager('D2E', 'w')\n fm.write('{\"task\":\"upload_policy\"}', 5)\n\n return Response(\"Success\")\n\n\n@api_view(['GET', 'POST' ])\ndef get_all_devices(request):\n \"\"\"\n Obtains a list of all devices\n \n Arguments:\n request -- An empty request\n Returns:\n response -- HttpResponse containing the list of devices in JSON format\n \"\"\"\n \n devmgr = RestConfig.devmgr\n devs=devmgr.get_all_devices()\n resp={}\n\n for dev in devs:\n status={}\n for status_name in dev.list_status():\n status[status_name]=dev.get_status_value(status_name)\n resp[dev.get_device_name()]=status\n return Response(resp)\n\n@api_view(['GET', 'POST' ])\ndef get_active_policies(request):\n \"\"\"\n Obtains a list of active policies from FIFO exposing policies\n \n Arguments:\n request -- An empty request\n Returns:\n response -- HttpResponse containing the list of active/inactive policies\n in JSON format\n \"\"\"\n \n folder_path=os.path.dirname(os.path.abspath(__file__))\n fm = FIFOManager(os.path.join(folder_path,'..','..','..','D2E'), 'w')\n fm.write('{\"task\":\"query\"}', 5)\n fm1 = FIFOManager(os.path.join(folder_path,'..','..','..','E2D'), 'r')\n\n return Response(fm1.read())\n\n@api_view(['GET', 'POST' ])\ndef set_state(request):\n \"\"\"\n Obtains a list of active policies from FIFO exposing policies\n \n Arguments:\n request -- An empty request\n Returns:\n response -- HttpResponse containing the list of active/inactive policies\n in JSON format\n \"\"\"\n \n RestConfig.myvar=\"it was set\"\n\n return Response(\"worked\")\n\n\n\n@api_view(['GET', 'POST' ])\ndef get_state(request):\n \"\"\"\n Obtains a list of active policies from FIFO exposing policies\n \n Arguments:\n request -- An empty request\n Returns:\n response -- HttpResponse containing the list of active/inactive policies\n in JSON format\n \"\"\"\n\n return Response(RestConfig.myvar)","sub_path":"backend/backend/rest/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"134691648","text":"\"\"\" Functions to help with testing Bokeh and reporting issues.\n\n\"\"\"\nfrom __future__ import absolute_import\n\ndef skipIfPy3(message):\n \"\"\" unittest decoractor to skip a test for Python 3\n\n \"\"\"\n from unittest import skipIf\n from .platform import is_py3\n return skipIf(is_py3(), message)\n\n\ndef skipIfPyPy(message):\n \"\"\" unittest decoractor to skip a test for PyPy\n\n \"\"\"\n from unittest import skipIf\n from .platform import is_pypy\n return skipIf(is_pypy(), message)\n\ndef print_versions():\n \"\"\" Print the versions for Bokeh and the current Python and OS.\n\n Returns:\n None\n\n \"\"\"\n import platform as pt\n from .. import __version__\n message = \"\"\"\n Bokeh version: %s\n Python version: %s-%s\n Platform: %s\n \"\"\" % (__version__, pt.python_version(),\n pt.python_implementation(), pt.platform())\n print(message)\n\ndef runtests(verbosity=1, xunitfile=None, exit=False):\n \"\"\" Run the full Bokeh test suite, and output the results of the tests\n to sys.stdout.\n\n This function uses nosetests to discover which tests to run, and will\n run tests in any 'tests' subdirectory within the Bokeh module.\n\n Args:\n verbosity (int, optional) :\n Acceptable values are 0 (less verbose) to 2 (most verbose)\n\n xunitfile (str, optional) :\n Write xunit-style XML test results to a given filename. This\n is useful for running tests on a CI server. (default: None)\n\n exit (bool, optional) :\n Whether to return or exit. If True, call sys.exit with an\n error code after the tests are finished. (default: False)\n\n Returns:\n int : Nose return code\n\n \"\"\"\n\n import nose, os, sys\n\n argv = ['nosetests', '--verbosity=%d' % verbosity]\n\n # Output an xunit file if requested\n if xunitfile:\n argv.extend(['--with-xunit', '--xunit-file=%s' % xunitfile])\n\n # Set the logging level to warn\n argv.extend(['--logging-level=WARN'])\n\n # Add all 'tests' subdirectories to the options\n rootdir = os.path.join(os.path.dirname(__file__), \"..\")\n for root, dirs, files in os.walk(rootdir):\n if 'tests' in dirs:\n testsdir = os.path.join(root, 'tests')\n argv.append(testsdir)\n print('Test dir: %s' % testsdir[len(rootdir)+1:])\n\n # print versions (handy when reporting problems)\n print_versions()\n sys.stdout.flush()\n\n # Run the tests\n return nose.main(argv=argv, exit=exit)\n","sub_path":"bokeh/util/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"60718921","text":"import sys\r\n\r\ndef runTests():\r\n\tpass\r\n\r\ndef method1(n, in_arr):\r\n\r\n\ttotal1 = 0\r\n\ti0 = int(in_arr[0])\r\n\tlast = i0\r\n\r\n\tfor i in xrange(1,n):\r\n\t\tcurrent = int(in_arr[i])\r\n\t\tif(current < last):\r\n\t\t\tdiff = last - current\r\n\t\t\ttotal1 += diff\r\n\t\t\t\r\n\t\tlast = current\r\n\treturn total1\r\n\r\ndef method2(n, in_arr):\r\n\ti0 = float(in_arr[0])\r\n\tlast = i0\r\n\trate = -1.0\r\n\r\n\tfor i in xrange(1,n):\r\n\t\tcurrent = float(in_arr[i])\r\n\t\tif(current <= last):\r\n\t\t\trate_maybe = float((last - current)/10.0)\r\n\t\t\tif (rate_maybe > rate):\r\n\t\t\t\trate = rate_maybe\r\n\r\n\t\tlast = current\r\n\r\n\ttotal1 = 0\r\n\ti0 = float(in_arr[0])\r\n\tlast = i0\r\n\trate_per_10 = rate * 10\r\n\r\n\tfor i in xrange(1,n):\r\n\t\tcurrent = int(in_arr[i])\r\n\t\tif(last > rate_per_10):\r\n\t\t\ttotal1 += rate_per_10\r\n\r\n\t\telse:\r\n\t\t\ttotal1 += last\r\n\r\n\t\tlast = current\r\n\t\t\t\r\n\r\n\t\r\n\treturn total1\r\n\r\nif __name__ == '__main__':\r\n\r\n\ttesting = False\r\n\t\r\n\tif(len(sys.argv) < 2):\r\n\t\ttesting = True\r\n\t\trunTests()\r\n\t\tf = open('testa', 'r')\r\n\telse:\r\n\t\tf = open(sys.argv[1], 'r')\r\n\r\n\ttestCases = int(f.next())\r\n\t\r\n\tfor caseNum in xrange(1, testCases + 1):\r\n\t\r\n\t\tN = int(f.next())\r\n\r\n\t\tinput = f.next().split()\r\n\r\n\r\n\t\tnum1 = method1(N, input)\r\n\t\tnum2 = method2(N, input)\r\n\r\n\r\n\r\n\r\n\t\tprint('Case #%d: %d %d' % (caseNum, num1, num2))\r\n","sub_path":"solutions_6404600001200128_1/Python/openforce/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"401795663","text":"# coding: utf-8\n# __author__: u\"John\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\nfrom mplib.dnr.factory import DataDenoiser\nfrom mplib.common import smart_decode\nimport traceback\nimport sys\n\nreload(sys)\nsys.setdefaultencoding(\"utf8\")\n\n\nkeywords = [\n '通知分享',\n '通知通知',\n '通知出停诊',\n '停诊通知',\n '填表',\n '预约转诊服务',\n]\n\n\ndef swap(lst):\n return [lst[1], lst[0]]\n\n\ntry:\n data = [line for line in sys.stdin]\n data = list(map(lambda x: swap(smart_decode(x).replace(\"\\n\", \"\").replace(\"\\r\", \"\").replace(\"\\\\N\", \"\").split(\"\\t\")), data))\n dd = DataDenoiser(data=data, content_index=1, head=[\"id\", \"content\"])\n dd.use_keywords = True\n dd.noise_keywords_list = keywords\n dd.udf_support = True\n dd.run()\nexcept Exception as e:\n print(\"\\t\".join([\"ERROR\", traceback.format_exc().replace(\"\\t\", \" \").replace(\"\\n\", \" \")]))\n","sub_path":"build/lib/mplib/dnr/udf/udf_denoise_inquiry.py","file_name":"udf_denoise_inquiry.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"240362391","text":"from scipy.stats import randint as sp_randint\r\n\r\n###########################################################\r\n#In this file, there are only the constant variables#######\r\n###########that may be used only once or more##############\r\n###########################################################\r\n\r\nos = \"unix\"\r\n\r\nWINDOWS_LOG_PATH = \"/Users/emrecalisir/git/brexit/CSSforPolitics/logs/predictor.log\"\r\nUNIX_LOG_PATH = \"/Users/emrecalisir/git/brexit/CSSforPolitics/logs/predictor.log\"\r\nUNIX_DATA_PATH = \"/home/ubuntu/users/emre/CSSforPolitics/user_stance/\"\r\n\r\n#RUN_MODE could be TRAIN, TEST, PREDICT_UNLABELED_DATA\r\nRUN_MODE = \"PREDICT_UNLABELED_DATA\"\r\nTRAIN_TYPE = \"K-FOLD\"\r\n\r\nFILE_STORE_MODEL = \"/Users/emrecalisir/git/brexit/CSSforPolitics/models/StanceClassifier_3Labels_5Aug.mdl\"\r\nORIGINAL_TEXT_COLUMN = \"tweet_text\"\r\nPROCESSED_TEXT_COLUMN = \"processed_text\"\r\n\r\n\r\nTRAIN_FILE_COLUMNS_MLRB = [\"text\",\"r1\"]\r\nTRAIN_FILE_COLUMNS_MLMA = [\"ID\", \"nbr_retweet\", \"nbr_favorite\", \"nbr_reply\", \"datetime\", \"text\", \"tw_lang\", \"new_p1\",\r\n \"user_favourites_count\", \"user_followers_count\", \"user_friends_count\", \"user_statuses_count\",\r\n \"api_res\",\"r1\"]\r\n\r\nPRED_FILE_COLUMNS = ['ID','user_id','datetime','user_screen_name','tw_coordinates','tw_retweet_count','tw_favorite_count','user_created_at','tw_hashtags','user_description','user_favourites_count','user_followers_count','user_friends_count','user_geo_enabled','user_lang','user_listed_count','user_location','user_statuses_count','user_verified','tw_full']\r\n\r\nPRED_NEW_COLS = ['usernameTweet','ID','text','tw_lang','url','nbr_retweet','nbr_favorite','nbr_reply','datetime','is_reply','is_retweet','user_id','tw_coordinates','tw_favorite_count','tw_favorited','tw_geo','tw_hashtags','tw_loc_country','tw_loc_fullname','tw_loc_name','tw_loc_type','tw_retweet_count','tw_source','user_created_at','user_default_profile','user_description','user_favourites_count','user_followers_count','user_friends_count','user_geo_enabled','user_lang','user_listed_count','user_location','user_name','user_profile_image_url','user_screen_name','user_statuses_count','user_timezone','user_url','user_utc_offset','user_verified','t_age','t_eth','t_gender','tw_full','sentiment']\r\n\r\nTRAIN_FILE_COLUMNS = [\"ID\", \"user_id\", \"datetime\", \"text\"]\r\n\r\nSTANCE_FILE_COLUMNS = [\"ID\", \"user_id\", \"datetime\", \"text\", \"r1\",\"sentiment\"]\r\n\r\nDISCOVER_FILE_COLUMNS = [\"ID\", \"user_id\", \"datetime\", \"text\"]\r\n\r\n\r\nDATAFRAME_COLUMNS_INT = ['nbr_retweet', 'user_followers_count', 'user_friends_count', 'user_favourites_count', 'new_p1',\r\n 'hashtag_count', 'mention_count', 'contains_link']\r\n\r\nDATAFRAME_COLUMNS = ['tw_full', 'nbr_retweet', 'user_followers_count', 'user_friends_count', 'user_favourites_count', 'new_p1',\r\n 'hashtag_count', 'mention_count', 'contains_link']\r\n\r\nTARGET_COLUMN = 'r1'\r\n\r\nINPUT_FILE_NAME_RB = \"F:/tmp/full_en3.csv_out.csv\"\r\nINPUT_FILE_NAME_TRAIN_MLRB = \"F:/tmp/random_stance_1_2_sample10K.csv\"\r\n#INPUT_FILE_NAME_TRAIN_MLMA = \"/Users/emrecalisir/git/brexit/CSSforPolitics/user_stance/train-remain-1470-rest-1470.txt\"\r\nINPUT_FILE_NAME_TRAIN_MLMA = \"/Users/emrecalisir/git/brexit/CSSforPolitics/user_stance/train-remain1470-rest1470.txt\"\r\n\r\nINPUT_FILE_PRED = \"/Users/emrecalisir/git/cortico/discovery/tws_6sep_2018_01.out_sent.csv_en\"\r\n#INPUT_FILE_PRED = \"/Users/emrecalisir/git/cortico/discovery/dataset_paper/data/sentiment/tws_6sep_2019_03_2.out_sent.csv\"\r\n\r\nINPUT_FILE_NAME_DISCOVER_PREDICT_NEUTRALS = WINDOWS_LOG_PATH + \"full_en_30_10_2018.csv_neutrals_out.csv\"\r\n\r\nINPUT_FILE_NAME_TEST = \"F:/tmp/remain-leave-train-650.txt\"\r\n\r\nTWITTER_APP_AUTH = {\r\n 'consumer_key': 'your_data',\r\n 'consumer_secret': 'your_data',\r\n 'access_token': 'your_data',\r\n 'access_token_secret': 'your_data',\r\n}\r\n\r\nMASHAPE_KEY = \"your_data\"\r\n\r\nGRID_SEARCH_PARAMS_RANDOM_FOREST = {\r\n 'vect__max_df': (0.5, 0.75, 1.0),\r\n #'vect__max_features': (None, 5000, 10000, 50000),\r\n 'vect__ngram_range': ((1, 1), (1, 2), (1,3), (1,4), (2,2), (2,3), (2,4), (3,3), (3,4), (4,4)), # unigrams or bigrams\r\n \"clf__max_depth\": [3, None],\r\n \"clf__max_features\": sp_randint(1, 11),\r\n \"clf__min_samples_split\": sp_randint(2, 11),\r\n \"clf__min_samples_leaf\": sp_randint(1, 11),\r\n \"clf__bootstrap\": [True, False],\r\n \"clf__criterion\": [\"gini\", \"entropy\"],\r\n\r\n }\r\n\r\nGRID_SEARCH_PARAMS_SGD = {\r\n 'vect__max_df': (0.5, 0.75, 1.0),\r\n 'vect__max_features': (None, 5000, 10000, 50000),\r\n 'vect__ngram_range': ((1, 1), (1, 2), (1,3), (1,4), (2,2), (2,3), (2,4), (3,3), (3,4), (4,4)), # unigrams or bigrams\r\n 'tfidf__use_idf': (True, False),\r\n 'tfidf__norm': ('l1', 'l2'),\r\n 'clf__alpha': (0.00001, 0.000001),\r\n 'clf__penalty': ('l2', 'elasticnet'),\r\n 'clf__n_iter': (10, 50, 80),\r\n}\r\n\r\nSGD_BEST_PARAMS = {\r\n 'alpha' : 0.00001,\r\n 'n_iter' : 10,\r\n 'penalty': 'l2',\r\n}\r\n\r\nNGRAM_BEST_PARAMS = {\r\n 'vect__max_df': 0.75,\r\n 'vect__max_features': 10000,\r\n 'vect__ngram_range': (1, 2),\r\n 'tfidf__use_idf': False,\r\n 'tfidf__norm': 'l2',\r\n}\r\n\r\nGRID_SEARCH_PARAMS_SVM = {\r\n 'vect__max_df': (0.5, 0.75, 1.0),\r\n 'vect__max_features': (None, 5000, 10000, 50000),\r\n 'vect__ngram_range': ((1, 1), (1, 2), (1, 3), (1, 4), (2, 2), (2, 3), (2, 4), (3, 3), (3, 4), (4, 4)),\r\n 'vect__analyzer': ('char', 'word'),\r\n 'clf__kernel':('rbf', 'linear'),\r\n 'clf__C':(1, 10, 100, 1000),\r\n 'tfidf__use_idf': (True, False),\r\n 'tfidf__norm': ('l1', 'l2'),\r\n}\r\n\r\nHASHTAG_REMAIN = [\"strongerin\", \"voteremain\", \"intogether\", \"labourinforbritain\", \"moreincommon\", \"greenerin\", \"catsagainstbrexit\", \"bremain\", \"betteroffin\", \"leadnotleave\", \"remain\", \"stay\", \"ukineu\", \"votein\", \"voteyes\", \"yes2eu\", \"yestoeu\", \"sayyes2europe\", \"fbpe\",\"stopbrexit\", \"stopbrexitsavebritain\",\"brexitshambles\"]\r\nHASHTAG_LEAVE = [\"leaveeuofficial\", \"leaveeu\", \"leave\", \"labourleave\", \"votetoleave\", \"voteleave\", \"takebackcontrol\", \"ivotedleave\", \"beleave\", \"betteroffout\", \"britainout\", \"nottip\", \"takecontrol\", \"voteno\", \"voteout\", \"voteleaveeu\", \"leavers\", \"vote_leave\", \"leavetheeu\", \"voteleave\", \"takecontrol\", \"votedleave\"]\r\nHASHTAG_NEUTRAL = [\"euref\", \"eureferendum\", \"eu\", \"uk\"]\r\n\r\n\r\nINPUT_TWEET_IDS_FILE_NAME = \"tweet_ids.csv\"\r\n\r\nINPUT_FILE_FULL_FEATURES = \"/home/ubuntu/users/emre/CSSforPolitics/user_stance/full_en_30_10_2018.csv\"\r\n\r\nMAX_PROB = 0.9\r\n\r\nMIN_PROB = 0.1\r\n\r\nELIMINATE_LOW_PROB = True\r\n\r\np2_times = ['2016-01', '2016-02', '2016-03', '2016-04', '2016-05', '2016-06-0', '2016-06-1', '2016-06-20', '2016-06-21']\r\np3_times = ['2016-06-22', '2016-06-23', '2016-06-24']\r\np4_times = ['2016-06-25', '2016-06-27', '2016-06-28', '2016-06-29', '2016-06-3','2016-07','2016-08','2016-09','2016-10','2016-11','2016-12' ]\r\np5_times = ['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06']\r\np6_times = ['2017-07', '2017-08', '2017-09', '2017-10', '2017-11', '2017-12']\r\np7_times = ['2018-01', '2018-02', '2018-03', '2018-04', '2018-05', '2018-06']\r\np8_times = ['2018-07', '2018-08', '2018-09', '2018-10', '2018-11', '2018-12']\r\n\r\n#CLASSIFIER_REMAIN = \"/Users/emrecalisir/git/brexit/CSSforPolitics/models/RemainClassifier_2019-08-11.mdl\"\r\n#CLASSIFIER_LEAVE = \"/Users/emrecalisir/git/brexit/CSSforPolitics/models/LeaveClassifier_2019-08-11.mdl\"\r\n\r\nCLASSIFIER_REMAIN = \"/Users/emrecalisir/git/brexit/CSSforPolitics2/models/RemainClassifier_2019-08-11.mdl\"\r\nCLASSIFIER_LEAVE = \"/Users/emrecalisir/git/brexit/CSSforPolitics2/models/LeaveClassifier_2019-08-11.mdl\"","sub_path":"util_tools/globals.py","file_name":"globals.py","file_ext":"py","file_size_in_byte":7485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"416865455","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Feb 5 17:23:15 2019\r\n\r\n@author: hjiang\r\n\"\"\"\r\n\r\n\"\"\"\r\nAccording to the Wikipedia's article: \"The Game of Life, also known simply as Life, \r\nis a cellular automaton devised by the British mathematician John Horton Conway in 1970.\"\r\n\r\nGiven a board with m by n cells, each cell has an initial state live (1) or dead (0). \r\nEach cell interacts with its eight neighbors (horizontal, vertical, diagonal) \r\nusing the following four rules (taken from the above Wikipedia article):\r\n\r\nAny live cell with fewer than two live neighbors dies, as if caused by under-population.\r\nAny live cell with two or three live neighbors lives on to the next generation.\r\nAny live cell with more than three live neighbors dies, as if by over-population..\r\nAny dead cell with exactly three live neighbors becomes a live cell, as if by reproduction.\r\nWrite a function to compute the next state (after one update) of the board given its current state. \r\nThe next state is created by applying the above rules simultaneously to every cell in the current state, \r\nwhere births and deaths occur simultaneously.\r\n\r\nExample:\r\n\r\nInput: \r\n[\r\n [0,1,0],\r\n [0,0,1],\r\n [1,1,1],\r\n [0,0,0]\r\n]\r\nOutput: \r\n[\r\n [0,0,0],\r\n [1,0,1],\r\n [0,1,1],\r\n [0,1,0]\r\n]\r\nFollow up:\r\n\r\nCould you solve it in-place? Remember that the board needs to be updated at the same time:\r\nYou cannot update some cells first and then use their updated values to update other cells.\r\nIn this question, we represent the board using a 2D array. In principle, the board is infinite, \r\nwhich would cause problems when the active area encroaches the border of the array. How would you address these problems?\r\n如果一个活着的部落,其周围少于2个部落,这个部落会死\r\n如果一个活着的部落,其周围部落数在2或者3,这个部落活到下一个迭代中\r\n如果一个活着的部落,其周围多于3个部落,这个部落会死\r\n如果一个死了的部落,其周围多于3个部落,这个部落会活\r\n\"\"\"\r\n\r\n# Time: O(m * n)\r\n# Space: O(1)\r\n\r\nclass Solution(object):#就看这个吧,文西\r\n def gameOfLife(self, board):\r\n \"\"\"\r\n :type board: List[List[int]]\r\n :rtype: void Do not return anything, modify board in-place instead.\r\n \"\"\"\r\n m = len(board)\r\n n = len(board[0]) if m else 0\r\n for i in range(m):\r\n for j in range(n):\r\n count = 0\r\n ## Count live cells in 3x3 block.\r\n for I in range(max(i-1, 0), min(i+2, m)):#三个,考虑边界问题\r\n for J in range(max(j-1, 0), min(j+2, n)):#同理三个,考虑边界问题\r\n count += board[I][J] & 1\r\n\r\n # if (count == 4 && board[i][j]) means:\r\n # Any live cell with three live neighbors lives.\r\n # if (count == 3) means:\r\n # Any live cell with two live neighbors.\r\n # Any dead cell with exactly three live neighbors lives.\r\n if (count == 4 and board[i][j]) or count == 3:#会活的情况\r\n board[i][j] |= 2 # Mark as live.就是+2\r\n\r\n for i in range(m):\r\n for j in range(n):\r\n board[i][j] >>= 1 # Update to the next state.#其他情况,都是死,这个地方有技巧,上面搞了个+2,那么移位之后自动变成1,感觉机智的不行\r\n \r\n \r\n \r\n#https://blog.csdn.net/fuxuemingzhu/article/details/82809923\r\n#方法很简单暴力,直接求每个位置的8-连通分量并统计部落数就好了,我的做法是先复制了一份,这样使用原始的board去判断部落数,\r\n#更新放在了新的board_next上不会影响之前的board。最后再把数值复制过来。\r\n#\r\n#时间复杂度是O(MN),空间复杂度是O(MN).\r\nimport copy\r\nclass Solution1(object):\r\n def gameOfLife(self, board):\r\n \"\"\"\r\n :type board: List[List[int]]\r\n :rtype: void Do not return anything, modify board in-place instead.\r\n \"\"\"\r\n if board and board[0]:\r\n M, N = len(board), len(board[0])\r\n board_next = copy.deepcopy(board)\r\n for m in range(M):\r\n for n in range(N):\r\n lod = self.liveOrDead(board, m, n)\r\n if lod == 2:\r\n board_next[m][n] = 0\r\n elif lod == 1:\r\n board_next[m][n] = 1\r\n for m in range(M):\r\n for n in range(N):\r\n board[m][n] = board_next[m][n]\r\n \r\n def liveOrDead(self, board, i, j):# return 0-nothing,1-live,2-dead\r\n ds = [(1, 1), (1, -1), (1, 0), (-1, 1), (-1, 0), (-1, -1), (0, 1), (0, -1)]\r\n live_count = 0\r\n M, N = len(board), len(board[0])\r\n for d in ds:\r\n r, c = i + d[0], j + d[1]\r\n if 0 <= r < M and 0 <= c < N:\r\n if board[r][c] == 1:\r\n live_count += 1\r\n if live_count < 2 or live_count > 3:\r\n return 2\r\n elif board[i][j] == 1 or (live_count == 3 and board[i][j] ==0):\r\n return 1\r\n else:\r\n return 0\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","sub_path":"Python3.6/289-Py3-Game of Life.py","file_name":"289-Py3-Game of Life.py","file_ext":"py","file_size_in_byte":5487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"45490640","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Arm Compute Library integration conv2d tests.\"\"\"\n\nimport numpy as np\n\nimport tvm\nfrom tvm import relay\n\nfrom .infrastructure import skip_runtime_test, skip_codegen_test, build_and_run, \\\n verify, verify_codegen\nfrom .infrastructure import Device\n\n\ndef _get_model(shape, kernel_size, padding, strides,\n dilation, groups, dtype, channels,\n var_names, has_bias=False, has_activation=False, has_pad=False):\n \"\"\"Return a model and any parameters it may have\"\"\"\n a = relay.var(next(var_names), shape=shape, dtype=dtype)\n if has_pad:\n p = ((0, 0), (padding[0], padding[0]), (padding[1], padding[1]), (0, 0))\n a = relay.nn.pad(a, pad_width=p)\n padding = (0, 0, 0, 0)\n else:\n if len(padding) == 2:\n padding = (padding[0], padding[1], padding[0], padding[1])\n shape = (shape[0], shape[1] + padding[0] * 2,\n shape[2] + padding[1] * 2, shape[3])\n weight_shape = (kernel_size, kernel_size, shape[3] // groups, channels)\n w = tvm.nd.array(np.random.uniform(-128, 127, weight_shape).astype(dtype))\n weights = relay.const(w, dtype)\n out = relay.nn.conv2d(\n a,\n weights,\n kernel_size=(kernel_size, kernel_size),\n data_layout=\"NHWC\",\n kernel_layout=\"HWIO\",\n dilation=(1, 1),\n strides=strides,\n padding=padding,\n groups=groups,\n channels=channels\n )\n params = {\"w\": w}\n if has_bias:\n b = tvm.nd.array(np.random.uniform(-128, 127, weight_shape[3]).astype(dtype))\n biasc = relay.const(b, dtype)\n out = relay.nn.bias_add(out, biasc, axis=3)\n params[\"b\"] = b\n if has_activation:\n out = relay.nn.relu(out)\n return out, params\n\n\ndef _get_expected_codegen(shape, kernel_size, padding, strides,\n dilation, groups, dtype, channels,\n has_bias=False, has_activation=False):\n if len(padding) == 2:\n padding = (padding[0], padding[1], padding[0], padding[1])\n weight_shape = (channels, kernel_size, kernel_size, shape[3] // groups)\n output_height = ((shape[1] - kernel_size + padding[0] + padding[2]) / strides[0]) + 1\n output_width = ((shape[2] - kernel_size + padding[1] + padding[3]) / strides[1]) + 1\n output_shape = (1, int(output_height), int(output_width), channels)\n\n node = {\n \"op\": \"kernel\",\n \"name\": \"nn.conv2d\",\n \"inputs\": [[0, 0, 0], [1, 0, 0]],\n \"attrs\": {\n \"groups\": [[\"1\"]],\n \"num_inputs\": str(3 if has_bias else 2),\n \"num_outputs\": \"1\",\n \"data_layout\": [[\"NHWC\"]],\n \"kernel_layout\": [[\"OHWI\"]],\n \"channels\": [[\"1\"]],\n \"dilation\": [[\"1\", \"1\"]],\n \"out_layout\": [[\"\"]],\n \"out_dtype\": [[\"\"]],\n \"kernel_size\": [[str(kernel_size), str(kernel_size)]],\n \"shape\": [[list(output_shape)]],\n \"dtype\": [[dtype]],\n \"padding\": [[str(p) for p in padding]],\n \"strides\": [[str(s) for s in strides]]\n },\n }\n\n if has_activation:\n node[\"attrs\"][\"activation_type\"] = [[\"relu\"]]\n\n input = {\n \"op\": \"input\",\n \"name\": \"\",\n \"attrs\": {\"shape\": [[list(shape)]], \"dtype\": [[\"float32\"]]}}\n kernel = {\n \"op\": \"const\",\n \"name\": \"\",\n \"attrs\": {\"shape\": [[list(weight_shape)]], \"dtype\": [[\"float32\"]]}}\n\n if has_bias:\n bias = {\n \"op\": \"const\",\n \"name\": \"\",\n \"attrs\": {\"shape\": [[[weight_shape[0]]]], \"dtype\": [[\"float32\"]]}}\n node[\"inputs\"].append([2, 0, 0])\n return [input, kernel, bias, node]\n else:\n return [input, kernel, node]\n\n\ndef test_conv2d():\n if skip_runtime_test():\n return\n\n device = Device()\n np.random.seed(0)\n\n shape = (1, 14, 14, 32)\n dtype = \"float32\"\n\n inputs = {\n \"a\": tvm.nd.array(np.random.uniform(-128, 127, shape).astype(dtype)),\n }\n\n for kernel_size in [1, 2, 3]:\n outputs = []\n func, params = _get_model(shape, kernel_size,\n (0, 0), (1, 1), 1, 1,\n dtype, 1, iter(inputs))\n for acl in [False, True]:\n outputs.append(build_and_run(func, inputs, 1,\n params, device,\n enable_acl=acl)[0])\n verify(outputs, atol=0.002, rtol=0.01)\n\n for pad_ksize in [((1, 1), 3), ((2, 2), 5), ((2, 1), 3)]:\n outputs = []\n func, params = _get_model(shape, pad_ksize[1], pad_ksize[0],\n (1, 1), 1, 1, dtype, 1, iter(inputs))\n for acl in [False, True]:\n outputs.append(build_and_run(func, inputs, 1,\n params, device,\n enable_acl=acl)[0])\n verify(outputs, atol=0.002, rtol=0.01)\n\n for strides in [(1, 1), (2, 2)]:\n outputs = []\n func, params = _get_model(shape, 2, (0, 0), strides,\n 1, 1, dtype, 1, iter(inputs))\n for acl in [False, True]:\n outputs.append(build_and_run(func, inputs, 1,\n params, device,\n enable_acl=acl)[0])\n verify(outputs, atol=0.002, rtol=0.01)\n\n # Test composite convolution: (has_pad, has_bias, has_activation).\n for composite in [(False, True, False), (False, False, True), (False, True, True),\n (True, False, False)]:\n outputs = []\n func, params = _get_model(shape, 2, (1, 1), (1, 1),\n 1, 1, dtype, 1, iter(inputs),\n has_pad=composite[0],\n has_bias=composite[1],\n has_activation=composite[2])\n for acl in [False, True]:\n outputs.append(build_and_run(func, inputs, 1,\n params, device,\n enable_acl=acl)[0])\n verify(outputs, atol=0.002, rtol=0.01)\n\n\ndef test_codegen_conv2d():\n if skip_codegen_test():\n return\n\n shape = (1, 25, 25, 1)\n dtype = \"float32\"\n inputs = {\"a\"}\n\n for pad_ksize in [((1, 1), 3), ((2, 1), 3)]:\n args = (shape, pad_ksize[1], pad_ksize[0], (1, 1), 1, 1, dtype, 1)\n func, params = _get_model(*args, var_names=iter(inputs))\n exp_codegen = _get_expected_codegen(*args)\n verify_codegen(func, exp_codegen, 1)\n # Test composite convolution: (has_pad, has_bias, has_activation).\n for composite in [(False, True, False), (False, False, True), (False, True, True),\n (True, False, False)]:\n args = (shape, 2, (1, 1), (1, 1), 1, 1, dtype, 1)\n func, params = _get_model(*args, var_names=iter(inputs),\n has_pad=composite[0],\n has_bias=composite[1],\n has_activation=composite[2])\n exp_codegen = _get_expected_codegen(*args,\n has_bias=composite[1],\n has_activation=composite[2])\n verify_codegen(func, exp_codegen, 1)\n\n\nif __name__ == \"__main__\":\n test_conv2d()\n test_codegen_conv2d()\n","sub_path":"tests/python/contrib/test_arm_compute_lib/test_conv2d.py","file_name":"test_conv2d.py","file_ext":"py","file_size_in_byte":8264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"327679155","text":"\"\"\"\nThe following gates are constructed using circuit identities from\nhttps://arxiv.org/pdf/quant-ph/9503016.pdf\n\"\"\"\nimport numpy as np\n\ndef add_ansatz_gzb(c, nr_q, nr_p, initialize=True):\n \"\"\"\n c: quantum circuit\n nr_q: number of qubits\n nr_p: number of qubits initialized in state 1\n initialize\n \"\"\"\n\n if initialize == True:\n for i in range(nr_p):\n c.add_gate(\"NOT\", i)\n\n # Ansatz\n it = 0\n start = nr_p - 1\n limit = nr_q\n\n while start != -1:\n\n cq = start\n tq = start + 1\n\n while tq < limit:\n add_GZB(c, cq, tq, np.random.normal(0, 1, 1)[0] * 2 * np.pi)\n cq = cq + 1\n tq = tq + 1\n it = it + 1\n\n start = start - 1\n limit = limit - 1\n\ndef add_CRZ(c, cq, tq, t):\n \"\"\"\n c: quantum circuit\n cq: control qubit\n tq: target qubit\n t: theta\n \"\"\"\n\n c.add_gate(\"ZPhase\", tq, phase=t / 2)\n c.add_gate(\"CNOT\", cq, tq)\n c.add_gate(\"ZPhase\", tq, phase=-t / 2)\n c.add_gate(\"CNOT\", cq, tq)\n\n\ndef add_CRX(c, cq, tq, t):\n \"\"\"\n c: quantum circuit\n cq: control qubit\n tq: target qubit\n t: theta\n \"\"\"\n c.add_gate(\"HAD\", tq)\n c.add_gate(\"ZPhase\", tq, phase=t / 2)\n c.add_gate(\"CNOT\", cq, tq)\n c.add_gate(\"ZPhase\", tq, phase=-t / 2)\n c.add_gate(\"CNOT\", cq, tq)\n c.add_gate(\"HAD\", tq)\n\n\ndef add_CRY(c, cq, tq, t):\n \"\"\"\n c: quantum circuit\n cq: control qubit\n tq: target qubit\n t: theta\n \"\"\"\n c.add_gate(\"S\", tq, adjoint=True)\n c.add_gate(\"HAD\", tq)\n add_CRZ(c, cq, tq, t)\n c.add_gate(\"HAD\", tq)\n c.add_gate(\"S\", tq)\n\n\ndef add_GZB(c, q1, q2, t):\n \"\"\"\n c: quantum circuit\n cq: control qubit\n tq: target qubit\n t: theta\n \"\"\"\n \"\"\"\n add_GZB_matrix:\n [[1,0,0,0]\n [0,cos(t),sin(t),0]\n [0,sin(t),-cos(t),0]\n [0,0,0,-1]]\n \"\"\"\n\n c.add_gate(\"CNOT\", q1, q2)\n c.add_gate(\"Z\", q1)\n add_CRY(c, q2, q1, np.pi - 2 * t)\n c.add_gate(\"CNOT\", q1, q2)\n","sub_path":"ansatz_and_gates.py","file_name":"ansatz_and_gates.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"337420751","text":"import unittest\nfrom hello_world import app\nfrom hello_world.formater import SUPPORTED\nimport json\n\n\nclass FlaskrTestCase(unittest.TestCase):\n def setUp(self):\n app.config['TESTING'] = True\n self.app = app.test_client()\n\n def test_outputs(self):\n rv = self.app.get('/outputs')\n s = str(rv.data)\n ','.join(SUPPORTED) in s\n\n def test_msg_with_output(self):\n rv = self.app.get('/?output=json')\n expected = {}\n expected['imie'] = 'Ania'\n expected['msg'] = 'Hello World'\n actual = json.loads(rv.data)\n self.assertEqual(expected['imie'], actual['imie'])\n","sub_path":"test/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"333256183","text":"__author__ = 'Faiku Fitim, Janusz Gradonski'\n\nfrom raum.Fixstern import *\nfrom raum.Planet import *\nfrom random import *\n\nESCAPE = '\\033'\n\n\nclass Galaxie():\n\n def __init__(self, width, height):\n # Da das Licht von open GL in Objektkoordinaten angezeigt (als Punkt) angezeigt wird,\n # muessen hier die Koordinaten uebergeben werden. Dazu nehme ich einfach den Nullpunkt\n # und verschiebe das Licht 80 Einheiten nach hinten. OpenGL behandelt die z-Achse negativ\n # dh ueberall, wo ich etwas nach hinten schieben moechte, muss ich (nur beim Licht) den pos.\n # Z-Wert hinschreiben.\n # Der zweite Parameter gibt die Lichtfarbe an. In diesem Fall ein leicht gruenliches\n # Licht mit 100 Prozent Alpha\n self.light = Light([0, 0, 0, 80], [0.8, 1.0, 0.8, 1.0])\n self.pos = False # Kameraansicht\n self.mod = True # modus texturen an oder aus\n self.lights = True\n self.anim = True\n self.zoom = 0\n self.width = width\n self.height = height\n\n # Die ganzen Objektattribute hier leer initialisieren, einfach, weil das\n # die korrekte Python Sprachsyntax besagt.\n self.erdenTextur = None\n self.jupiterTextur = None\n self.marsTextur = None\n self.merkurTextur = None\n self.mondTextur = None\n self.neptunTextur = None\n self.plutoTextur = None\n self.saturnTextur = None\n self.sonnenTextur = None\n self.uranusTextur = None\n self.venusTextur = None\n def loadTextures(self):\n \"\"\"\n Die Texturen fuer die Planeten werden geladen.\n \"\"\"\n self.sonnenTextur = Texturen.LoadTexture(\"../data/sonne.jpg\")\n self.mondTextur = Texturen.LoadTexture(\"../data/mond.jpg\")\n self.erdenTextur = Texturen.LoadTexture(\"../data/erde.jpg\")\n self.uranusTextur = Texturen.LoadTexture(\"../data/uranus.jpg\")\n self.jupiterTextur = Texturen.LoadTexture(\"../data/jupiter.jpg\")\n self.marsTextur = Texturen.LoadTexture(\"../data/mars.jpg\")\n self.merkurTextur = Texturen.LoadTexture(\"../data/merkur.jpg\")\n self.neptunTextur = Texturen.LoadTexture(\"../data/neptun.jpg\")\n self.plutoTextur = Texturen.LoadTexture(\"../data/pluto.jpg\")\n self.saturnTextur = Texturen.LoadTexture(\"../data/saturn.jpg\")\n self.venusTextur = Texturen.LoadTexture(\"../data/venus.jpg\")\n\n def pauseAll(self):\n \"\"\"\n methode um alle bewegungen zu pasuerien\n \"\"\"\n self.sonne.setAnimation(False)\n\n def playAll(self):\n \"\"\"\n methode um alle bwewgungen zu aktivieren\n \"\"\"\n self.sonne.setAnimation(True)\n\n def loadPlanets(self):\n \"\"\"\n alle planeten anlegen\n \"\"\"\n # Parameter(Planet): (position, anim, rotation, rotSpeed, rotPoint, movSpeed, radius, textur, divisions, monde)\n # Parameter(Fixstern): (position, rotSpeed, textur, planeten, anim, licht, radius, divisions)\n # Parameter(Mond): (anim, rotation, rotSpeed, parent, entf_rotPoint, movSpeed, radius, textur, divisions)\n # Fixstern ist die Sonne\n self.sonne = Fixstern([0, 0, -80], 0.2, self.sonnenTextur, None, True, self.light, 10, 64)\n\n # Planeten\n self.merkur = Planet([-17, 0, -80], True, [90, 0, 0], 0.05, self.sonne.position, 0.0001, 0.4, self.merkurTextur, 32, None)\n self.venus = Planet([-20.5, 0, -80], True, [90, 0, 0], 0.05, self.sonne.position, 0.000125, 1.21, self.venusTextur, 32, None)\n self.erde = Planet([-29, 0, -80], True, [90, 0, 0], 0.6, self.sonne.position, 0.00006, 1.28, self.erdenTextur, 32, None)\n self.mars = Planet([-36.5, 0, -80], True, [90, 0, 0], 0.05, self.sonne.position, 0.00009, 0.6, self.marsTextur, 32, None)\n self.jupiter = Planet([-59, 0, -80], True, [90, 0, 0], 0.05, self.sonne.position, 0.000035, 14.3, self.jupiterTextur, 32, None)\n self.saturn = Planet([-85, 0, -80], True, [90, 0, 0], 0.05, self.sonne.position, 0.000056, 12.05, self.saturnTextur, 32, None)\n self.uranus = Planet([-98, 0, -80], True, [90, 0, 0], 0.05, self.sonne.position, 0.00004, 5.11, self.uranusTextur, 32, None)\n self.neptun = Planet([-115, 0, -80], True, [90, 0, 0], 0.05, self.sonne.position, 0.000087, 4.95, self.neptunTextur, 32, None)\n self.pluto = Planet([-125, 0, -80], True, [90, 0, 0], 0.05, self.sonne.position, 0.000025, 0.1, self.plutoTextur, 32, None)\n\n # Monde\n self.mond = Mond(True, [-90, 0, -80], 0, self.erde, 5, -0.00005, 0.4, self.mondTextur, 24)\n\n self.erde.addMond(self.mond)\n self.sonne.addPlanet(self.erde)\n self.sonne.addPlanet(self.jupiter)\n self.sonne.addPlanet(self.mars)\n self.sonne.addPlanet(self.merkur)\n self.sonne.addPlanet(self.neptun)\n self.sonne.addPlanet(self.pluto)\n self.sonne.addPlanet(self.saturn)\n self.sonne.addPlanet(self.uranus)\n self.sonne.addPlanet(self.venus)\n\n def enableTextures(self):\n \"\"\"\n texturen aktivieren\n \"\"\"\n glEnable(GL_TEXTURE_2D)\n\n def disableTextures(self):\n \"\"\"\n texturen deaktiviere - alles wird weiss angezeigt\n \"\"\"\n glDisable(GL_TEXTURE_2D)\n\n def init(self, Width, Height):\n \"\"\"\n screen vorbereiten\n \"\"\"\n glClearColor(0.0, 0.0, 0.0, 0.0) # Hintergrundfarbe\n glClearDepth(1.0) # Loeschen des Depth Buffers\n glDepthFunc(GL_LESS) # The Type Of Depth Test To Do\n glEnable(GL_DEPTH_TEST) # Enables Depth Testing\n glShadeModel(GL_SMOOTH) # Enables Smooth Color Shading\n\n glMatrixMode(GL_PROJECTION) # Reset The Projection Matrix\n glLoadIdentity()\n\n # Calculate The Aspect Ratio Of The Window\n gluPerspective(45.0, float(Width) / float(Height), 0.1, 100000.0)\n glMatrixMode(GL_MODELVIEW)\n\n glutKeyboardFunc(self.keyPressed) # steuerung ueber die tatstatur aktivieren\n\n # Licht initialisieren\n self.light.init()\n self.loadTextures()\n self.loadPlanets()\n self.enableTextures()\n\n \"\"\"\n Wenn die groesse vom Fenster geaendert wird\n \"\"\"\n def ReSizeGLScene(self, Width, Height):\n \"\"\"\n bei veranederung der fenstergroesse, die galaxie anpassen\n \"\"\"\n # Wenn das Fenster zu klein ist, erhoehen auf 1\n if Height == 0:\n Height = 1\n\n glViewport(0, 0, Width, Height) # Reset The Current Viewport And Perspective Transformation\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n\n # Perspektive\n gluPerspective(50.0, float(Width) / float(Height), 0.1, 100000.0) # Naehe\n glMatrixMode(GL_MODELVIEW)\n\n self.width = Width\n self.height = Height\n\n \"\"\"\n szene zeichenn\n \"\"\"\n def DrawGLScene(self):\n \"\"\"\n alle komponenten zeichnen\n :return:\n \"\"\"\n self.update()\n\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) # Screen loeschen und depth buffer loeschen\n\n self.sonne.draw(self.pos, self.zoom)\n\n\n glutSwapBuffers() # zeichnen\n\n def update(self):\n \"\"\"\n alle komponenten aktualisierne\n :return:\n \"\"\"\n self.sonne.update()\n\n\n def keyPressed(self, *args):\n \"\"\"\n handling der tastaturbefehle\n :param args: eingehender tastenbefehl\n :return:\n \"\"\"\n if args[0] == b'p':\n if self.anim:\n self.pauseAll()\n self.anim = False\n else:\n self.playAll()\n self.anim = True\n\n if args[0] == b'1':\n if self.pos:\n self.zoom = 0\n self.pos = False\n if args[0] == b'2':\n self.zoom = 0\n self.pos = True\n\n if args[0] == b't':\n if self.mod:\n self.disableTextures()\n self.mod = False\n else:\n self.enableTextures()\n self.mod = True\n\n if args[0] == b'l':\n if self.lights:\n self.sonne.disableLight()\n self.lights = False\n else:\n self.sonne.enableLight()\n self.lights = True\n\n if args[0] == b's':\n self.sonne.animateAllChildrenFaster(0.05, 0.0001)\n\n if args[0] == b'd':\n self.sonne.animateAllChildrenSlower(0.05, 0.0001)\n\n if args[0] == b'+':\n self.zoom += 5\n\n if args[0] == b'-':\n self.zoom -= 5","sub_path":"raum/Galaxie.py","file_name":"Galaxie.py","file_ext":"py","file_size_in_byte":8607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"498963816","text":"# -*- coding: utf-8 -*-\n#############################################################################\n# __________ #\n# __ __/ ____/ __ \\__ __ This file is part of MicroGP4 v1.0 \"Kiwi\" #\n# / / / / / __/ /_/ / // / (!) by Giovanni Squillero and Alberto Tonda #\n# / /_/ / /_/ / ____/ // /_ https://github.com/squillero/microgp4 #\n# \\__ /\\____/_/ /__ __/ #\n# /_/ --MicroGP4-- /_/ \"You don't need a big goal, be μ-ambitious!!\" #\n# #\n#############################################################################\n\n# Copyright 2020 Giovanni Squillero and Alberto Tonda\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .fitness.base import is_dominated\nfrom .individual import Individual\n\n\nclass Archive:\n \"\"\"This class manages the set of individuals not dominated by all other\n individuals currently or previously contained in the\n :mod:`microgp.darwin.Darwin._population`.\n\n **Examples:**\n\n - Try to insert an individual in the archive:\n\n >>> self._archive += individual\n\n The individual will be inserted only if it is not dominated by all\n individual already inside the archive. If it is not dominated then the\n individuals that just became dominated are removed from the archive.\n \"\"\"\n\n def __init__(self) -> None:\n \"\"\"Archive builder\"\"\"\n self._individuals = set()\n\n def __iadd__(self, candidate_individual: Individual) -> 'Archive':\n \"\"\"Add to the archive a new individual if it is not dominated by any\n of the individuals already present. The individuals that just became\n dominated are removed from the archive.\n\n Args:\n candidate_individual: Individual to be inserted into the archive\n\n Returns:\n The archive just updated\n \"\"\"\n if all(not is_dominated(e.fitness, candidate_individual.fitness) for e in self._individuals):\n self._individuals = {candidate_individual} | {\n e for e in set(self._individuals) if not is_dominated(candidate_individual.fitness, e.fitness)\n }\n return self\n\n def __str__(self) -> str:\n \"\"\"Return a string with the phenotypes of all individuals inside the\n archive\"\"\"\n if not self._individuals:\n return \"No individuals in archive\"\n string = \"\\n;\" + \"-\" * 76\n for individual in self._individuals:\n string += \"\\n\" + str(individual) + \"\\n With score of: \" + str(individual.fitness)\n string += \"\\n;\" + \"-\" * 76\n return string\n\n @property\n def individuals(self):\n return self._individuals\n","sub_path":"src/microgp/archive.py","file_name":"archive.py","file_ext":"py","file_size_in_byte":3286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"596236054","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math as mt\nfrom minepy import MINE\n\ndef corx(N, rho = 0.5):\n r = np.random.randn(N,2)\n r1 = r[:,0]\n r11 = r[:,1]\n r2 = rho*r1 + (1-rho**2)**0.5 * r11\n return r1,r2\n\ndef realm(x):\n\n y = [ -0.5* mt.log(1-i**2,2) for i in x]\n return y\n\n\nmn = MINE(alpha=0.6, c=15)\nx = np.linspace(0.01, 0.99,20)\ny = realm(x)\nN=2000\nres=[]\nfor i in xrange(len(x)):\n r1,r2 = corx(N,x[i])\n mn.compute_score(r1,r2)\n res.append(mn.mic())\n print(i)\n","sub_path":"code/toolset/test/mi_test.py","file_name":"mi_test.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"130981897","text":"def cast_to_numeric(string):\n if string.find('.') == -1:\n return int(string)\n return float(string)\n\n\ndef read_data(file_name):\n with open(file_name, 'r') as file:\n file.readline()\n content = file.readline().strip()\n array = content.split()\n return [*map(cast_to_numeric, array)]\n","sub_path":"simple_sorting/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"}